1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Martin Trapp, Christian Schaefer
6 * ircons.c: basic and more detailed irnode constructors
7 * store, block and parameter administration.
8 * Adapted to extended FIRM nodes (exceptions...) and commented
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
32 #if USE_EXPLICIT_PHI_IN_STACK
33 /* A stack needed for the automatic Phi node construction in constructor
34 Phi_in. Redefinition in irgraph.c!! */
39 typedef struct Phi_in_stack Phi_in_stack;
43 * language dependant initialization variable
45 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
47 /*** ******************************************** */
48 /** privat interfaces, for professional use only */
50 /* Constructs a Block with a fixed number of predecessors.
51 Does not set current_block. Can not be used with automatic
52 Phi node construction. */
54 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
58 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
59 set_Block_matured(res, 1);
60 set_Block_block_visited(res, 0);
62 res->attr.block.exc = exc_normal;
63 res->attr.block.handler_entry = 0;
64 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
65 res->attr.block.in_cg = NULL;
66 res->attr.block.cg_backedge = NULL;
68 irn_vrfy_irg (res, irg);
73 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
77 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
78 res->attr.start.irg = irg;
80 irn_vrfy_irg (res, irg);
85 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
89 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
91 irn_vrfy_irg (res, irg);
95 /* Creates a Phi node with all predecessors. Calling this constructor
96 is only allowed if the corresponding block is mature. */
98 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
102 bool has_unknown = false;
104 assert( get_Block_matured(block) );
105 assert( get_irn_arity(block) == arity );
107 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
109 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
111 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
112 if (!has_unknown) res = optimize_node (res);
113 irn_vrfy_irg (res, irg);
115 /* Memory Phis in endless loops must be kept alive.
116 As we can't distinguish these easily we keep all of them alive. */
117 if ((res->op == op_Phi) && (mode == mode_M))
118 add_End_keepalive(irg->end, res);
123 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
126 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
128 res = optimize_node (res);
129 irn_vrfy_irg (res, irg);
132 res = local_optimize_newby (res);
139 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
144 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
145 res = optimize_node (res);
146 irn_vrfy_irg (res, irg);
151 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
157 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
158 res->attr.proj = proj;
161 assert(get_Proj_pred(res));
162 assert(get_nodes_Block(get_Proj_pred(res)));
164 res = optimize_node (res);
166 irn_vrfy_irg (res, irg);
172 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
176 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
177 arg->attr.c.kind = fragmentary;
178 arg->attr.c.default_proj = max_proj;
179 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
184 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
189 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
190 res = optimize_node (res);
191 irn_vrfy_irg (res, irg);
196 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
199 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
200 res->attr.cast.totype = to_tp;
201 res = optimize_node (res);
202 irn_vrfy_irg (res, irg);
207 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
211 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
212 res = optimize_node (res);
213 irn_vrfy_irg (res, irg);
218 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
219 ir_node *op1, ir_node *op2, ir_mode *mode)
225 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
226 res = optimize_node (res);
227 irn_vrfy_irg (res, irg);
232 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
233 ir_node *op1, ir_node *op2, ir_mode *mode)
239 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
240 res = optimize_node (res);
241 irn_vrfy_irg (res, irg);
246 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
247 ir_node *op, ir_mode *mode)
252 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
253 res = optimize_node (res);
254 irn_vrfy_irg (res, irg);
259 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
260 ir_node *op1, ir_node *op2, ir_mode *mode)
266 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
267 res = optimize_node (res);
268 irn_vrfy_irg (res, irg);
273 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
274 ir_node *memop, ir_node *op1, ir_node *op2)
281 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
282 res = optimize_node (res);
283 irn_vrfy_irg (res, irg);
288 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *memop, ir_node *op1, ir_node *op2)
296 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
297 res = optimize_node (res);
298 irn_vrfy_irg (res, irg);
303 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
304 ir_node *memop, ir_node *op1, ir_node *op2)
311 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
312 res = optimize_node (res);
313 irn_vrfy_irg (res, irg);
318 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
319 ir_node *memop, ir_node *op1, ir_node *op2)
326 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
327 res = optimize_node (res);
328 irn_vrfy_irg (res, irg);
333 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
334 ir_node *op1, ir_node *op2, ir_mode *mode)
340 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
341 res = optimize_node (res);
342 irn_vrfy_irg (res, irg);
347 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *op1, ir_node *op2, ir_mode *mode)
354 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
355 res = optimize_node (res);
356 irn_vrfy_irg (res, irg);
361 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
362 ir_node *op1, ir_node *op2, ir_mode *mode)
368 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
369 res = optimize_node (res);
370 irn_vrfy_irg (res, irg);
375 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
376 ir_node *op, ir_mode *mode)
381 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
382 res = optimize_node (res);
383 irn_vrfy_irg (res, irg);
388 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
389 ir_node *op, ir_node *k, ir_mode *mode)
395 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
396 res = optimize_node (res);
397 irn_vrfy_irg (res, irg);
402 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
403 ir_node *op, ir_node *k, ir_mode *mode)
409 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
410 res = optimize_node (res);
411 irn_vrfy_irg (res, irg);
416 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
417 ir_node *op, ir_node *k, ir_mode *mode)
423 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
424 res = optimize_node (res);
425 irn_vrfy_irg (res, irg);
430 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
431 ir_node *op, ir_node *k, ir_mode *mode)
437 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
438 res = optimize_node (res);
439 irn_vrfy_irg (res, irg);
444 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
445 ir_node *op, ir_mode *mode)
450 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
451 res = optimize_node (res);
452 irn_vrfy_irg (res, irg);
457 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
458 ir_node *op1, ir_node *op2)
464 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
465 res = optimize_node (res);
466 irn_vrfy_irg (res, irg);
471 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
474 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
475 res = optimize_node (res);
476 irn_vrfy_irg (res, irg);
481 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
486 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
487 res->attr.c.kind = dense;
488 res->attr.c.default_proj = 0;
489 res = optimize_node (res);
490 irn_vrfy_irg (res, irg);
495 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
496 ir_node *callee, int arity, ir_node **in, type *tp)
503 NEW_ARR_A (ir_node *, r_in, r_arity);
506 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
508 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
510 assert(is_method_type(tp));
511 set_Call_type(res, tp);
512 res->attr.call.callee_arr = NULL;
513 res = optimize_node (res);
514 irn_vrfy_irg (res, irg);
519 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
520 ir_node *store, int arity, ir_node **in)
527 NEW_ARR_A (ir_node *, r_in, r_arity);
529 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
530 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
531 res = optimize_node (res);
532 irn_vrfy_irg (res, irg);
537 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
543 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
544 res = optimize_node (res);
545 irn_vrfy_irg (res, irg);
550 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
551 ir_node *store, ir_node *adr)
557 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
559 res = optimize_node (res);
560 irn_vrfy_irg (res, irg);
565 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
566 ir_node *store, ir_node *adr, ir_node *val)
573 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
575 res = optimize_node (res);
577 irn_vrfy_irg (res, irg);
582 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
583 ir_node *size, type *alloc_type, where_alloc where)
589 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
591 res->attr.a.where = where;
592 res->attr.a.type = alloc_type;
594 res = optimize_node (res);
595 irn_vrfy_irg (res, irg);
600 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
601 ir_node *ptr, ir_node *size, type *free_type)
608 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
610 res->attr.f = free_type;
612 res = optimize_node (res);
613 irn_vrfy_irg (res, irg);
618 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
619 int arity, ir_node **in, entity *ent)
626 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
629 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
630 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
632 res->attr.s.ent = ent;
634 res = optimize_node (res);
635 irn_vrfy_irg (res, irg);
640 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
641 ir_node *objptr, type *ent)
648 NEW_ARR_A (ir_node *, r_in, r_arity);
652 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
654 res->attr.io.ent = ent;
656 /* res = optimize (res);
657 * irn_vrfy_irg (res, irg); */
662 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
663 symconst_kind symkind)
667 if (symkind == linkage_ptr_info)
671 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
673 res->attr.i.num = symkind;
674 if (symkind == linkage_ptr_info) {
675 res->attr.i.tori.ptrinfo = (ident *)value;
677 assert ( ( (symkind == type_tag)
678 || (symkind == size))
679 && (is_type(value)));
680 res->attr.i.tori.typ = (type *)value;
682 res = optimize_node (res);
683 irn_vrfy_irg (res, irg);
688 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
692 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
694 res = optimize_node (res);
695 irn_vrfy_irg (res, irg);
700 new_rd_Bad (ir_graph *irg)
706 new_rd_Unknown (ir_graph *irg)
712 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
716 in[0] = get_Call_ptr(call);
717 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
718 res->attr.callbegin.irg = irg;
719 res->attr.callbegin.call = call;
720 res = optimize_node (res);
721 irn_vrfy_irg (res, irg);
726 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
730 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
731 res->attr.end.irg = irg;
733 irn_vrfy_irg (res, irg);
738 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
742 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
743 res->attr.end.irg = irg;
745 irn_vrfy_irg (res, irg);
750 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
753 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
754 res = optimize_node (res);
755 irn_vrfy_irg (res, irg);
760 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
766 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
767 res->attr.filter.proj = proj;
768 res->attr.filter.in_cg = NULL;
769 res->attr.filter.backedge = NULL;
772 assert(get_Proj_pred(res));
773 assert(get_nodes_Block(get_Proj_pred(res)));
775 res = optimize_node (res);
777 irn_vrfy_irg (res, irg);
782 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
783 return new_rd_Block(NULL, irg, arity, in);
785 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
786 return new_rd_Start(NULL, irg, block);
788 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
789 return new_rd_End(NULL, irg, block);
791 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
792 return new_rd_Jmp(NULL, irg, block);
794 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
795 return new_rd_Cond(NULL, irg, block, c);
797 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
798 ir_node *store, int arity, ir_node **in) {
799 return new_rd_Return(NULL, irg, block, store, arity, in);
801 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
802 ir_node *store, ir_node *obj) {
803 return new_rd_Raise(NULL, irg, block, store, obj);
805 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
806 ir_mode *mode, tarval *con) {
807 return new_rd_Const(NULL, irg, block, mode, con);
809 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
810 type_or_id_p value, symconst_kind symkind) {
811 return new_rd_SymConst(NULL, irg, block, value, symkind);
813 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
814 ir_node *objptr, int n_index, ir_node **index,
816 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
818 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
820 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
822 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
823 ir_node *callee, int arity, ir_node **in,
825 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
827 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
828 ir_node *op1, ir_node *op2, ir_mode *mode) {
829 return new_rd_Add(NULL, irg, block, op1, op2, mode);
831 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
832 ir_node *op1, ir_node *op2, ir_mode *mode) {
833 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
835 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
836 ir_node *op, ir_mode *mode) {
837 return new_rd_Minus(NULL, irg, block, op, mode);
839 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
840 ir_node *op1, ir_node *op2, ir_mode *mode) {
841 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
843 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
844 ir_node *memop, ir_node *op1, ir_node *op2) {
845 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
847 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
848 ir_node *memop, ir_node *op1, ir_node *op2) {
849 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
851 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
852 ir_node *memop, ir_node *op1, ir_node *op2) {
853 return new_rd_Div(NULL, irg, block, memop, op1, op2);
855 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
856 ir_node *memop, ir_node *op1, ir_node *op2) {
857 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
859 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
860 ir_node *op, ir_mode *mode) {
861 return new_rd_Abs(NULL, irg, block, op, mode);
863 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
864 ir_node *op1, ir_node *op2, ir_mode *mode) {
865 return new_rd_And(NULL, irg, block, op1, op2, mode);
867 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
868 ir_node *op1, ir_node *op2, ir_mode *mode) {
869 return new_rd_Or(NULL, irg, block, op1, op2, mode);
871 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
872 ir_node *op1, ir_node *op2, ir_mode *mode) {
873 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
875 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
876 ir_node *op, ir_mode *mode) {
877 return new_rd_Not(NULL, irg, block, op, mode);
879 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
880 ir_node *op1, ir_node *op2) {
881 return new_rd_Cmp(NULL, irg, block, op1, op2);
883 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
884 ir_node *op, ir_node *k, ir_mode *mode) {
885 return new_rd_Shl(NULL, irg, block, op, k, mode);
887 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
888 ir_node *op, ir_node *k, ir_mode *mode) {
889 return new_rd_Shr(NULL, irg, block, op, k, mode);
891 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
892 ir_node *op, ir_node *k, ir_mode *mode) {
893 return new_rd_Shrs(NULL, irg, block, op, k, mode);
895 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
896 ir_node *op, ir_node *k, ir_mode *mode) {
897 return new_rd_Rot(NULL, irg, block, op, k, mode);
899 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
900 ir_node *op, ir_mode *mode) {
901 return new_rd_Conv(NULL, irg, block, op, mode);
903 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
904 return new_rd_Cast(NULL, irg, block, op, to_tp);
906 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
907 ir_node **in, ir_mode *mode) {
908 return new_rd_Phi(NULL, irg, block, arity, in, mode);
910 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
911 ir_node *store, ir_node *adr) {
912 return new_rd_Load(NULL, irg, block, store, adr);
914 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
915 ir_node *store, ir_node *adr, ir_node *val) {
916 return new_rd_Store(NULL, irg, block, store, adr, val);
918 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
919 ir_node *size, type *alloc_type, where_alloc where) {
920 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
922 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
923 ir_node *ptr, ir_node *size, type *free_type) {
924 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
926 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
927 return new_rd_Sync(NULL, irg, block, arity, in);
929 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
930 ir_mode *mode, long proj) {
931 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
933 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
935 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
937 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
938 int arity, ir_node **in) {
939 return new_rd_Tuple(NULL, irg, block, arity, in );
941 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
942 ir_node *val, ir_mode *mode) {
943 return new_rd_Id(NULL, irg, block, val, mode);
945 INLINE ir_node *new_r_Bad (ir_graph *irg) {
946 return new_rd_Bad(irg);
948 INLINE ir_node *new_r_Unknown (ir_graph *irg) {
949 return new_rd_Unknown(irg);
951 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
952 return new_rd_CallBegin(NULL, irg, block, callee);
954 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
955 return new_rd_EndReg(NULL, irg, block);
957 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
958 return new_rd_EndExcept(NULL, irg, block);
960 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
961 return new_rd_Break(NULL, irg, block);
963 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
964 ir_mode *mode, long proj) {
965 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
969 /** ********************/
970 /** public interfaces */
971 /** construction tools */
975 * - create a new Start node in the current block
977 * @return s - pointer to the created Start node
982 new_d_Start (dbg_info* db)
986 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
987 op_Start, mode_T, 0, NULL);
988 res->attr.start.irg = current_ir_graph;
990 res = optimize_node (res);
991 irn_vrfy_irg (res, current_ir_graph);
996 new_d_End (dbg_info* db)
999 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1000 op_End, mode_X, -1, NULL);
1001 res = optimize_node (res);
1002 irn_vrfy_irg (res, current_ir_graph);
1007 /* Constructs a Block with a fixed number of predecessors.
1008 Does set current_block. Can be used with automatic Phi
1009 node construction. */
1011 new_d_Block (dbg_info* db, int arity, ir_node **in)
1015 bool has_unknown = false;
1017 res = new_rd_Block (db, current_ir_graph, arity, in);
1019 /* Create and initialize array for Phi-node construction. */
1020 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1021 current_ir_graph->n_loc);
1022 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1024 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1026 if (!has_unknown) res = optimize_node (res);
1027 current_ir_graph->current_block = res;
1029 irn_vrfy_irg (res, current_ir_graph);
1034 /* ***********************************************************************/
1035 /* Methods necessary for automatic Phi node creation */
1037 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1038 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1039 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1040 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1042 Call Graph: ( A ---> B == A "calls" B)
1044 get_value mature_block
1052 get_r_value_internal |
1056 new_rd_Phi0 new_rd_Phi_in
1058 * *************************************************************************** */
1060 /* Creates a Phi node with 0 predecessors */
1061 static INLINE ir_node *
1062 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1065 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1066 irn_vrfy_irg (res, irg);
1070 /* There are two implementations of the Phi node construction. The first
1071 is faster, but does not work for blocks with more than 2 predecessors.
1072 The second works always but is slower and causes more unnecessary Phi
1074 Select the implementations by the following preprocessor flag set in
1076 #if USE_FAST_PHI_CONSTRUCTION
1078 /* This is a stack used for allocating and deallocating nodes in
1079 new_rd_Phi_in. The original implementation used the obstack
1080 to model this stack, now it is explicit. This reduces side effects.
1082 #if USE_EXPLICIT_PHI_IN_STACK
1083 INLINE Phi_in_stack *
1084 new_Phi_in_stack() {
1087 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1089 res->stack = NEW_ARR_F (ir_node *, 1);
1096 free_Phi_in_stack(Phi_in_stack *s) {
1097 DEL_ARR_F(s->stack);
1101 free_to_Phi_in_stack(ir_node *phi) {
1102 assert(get_irn_opcode(phi) == iro_Phi);
1104 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1105 current_ir_graph->Phi_in_stack->pos)
1106 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1108 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1110 (current_ir_graph->Phi_in_stack->pos)++;
1113 static INLINE ir_node *
1114 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1115 int arity, ir_node **in) {
1117 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1118 int pos = current_ir_graph->Phi_in_stack->pos;
1122 /* We need to allocate a new node */
1123 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1124 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1126 /* reuse the old node and initialize it again. */
1129 assert (res->kind == k_ir_node);
1130 assert (res->op == op_Phi);
1134 assert (arity >= 0);
1135 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1136 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1138 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1140 (current_ir_graph->Phi_in_stack->pos)--;
1144 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1146 /* Creates a Phi node with a given, fixed array **in of predecessors.
1147 If the Phi node is unnecessary, as the same value reaches the block
1148 through all control flow paths, it is eliminated and the value
1149 returned directly. This constructor is only intended for use in
1150 the automatic Phi node generation triggered by get_value or mature.
1151 The implementation is quite tricky and depends on the fact, that
1152 the nodes are allocated on a stack:
1153 The in array contains predecessors and NULLs. The NULLs appear,
1154 if get_r_value_internal, that computed the predecessors, reached
1155 the same block on two paths. In this case the same value reaches
1156 this block on both paths, there is no definition in between. We need
1157 not allocate a Phi where these path's merge, but we have to communicate
1158 this fact to the caller. This happens by returning a pointer to the
1159 node the caller _will_ allocate. (Yes, we predict the address. We can
1160 do so because the nodes are allocated on the obstack.) The caller then
1161 finds a pointer to itself and, when this routine is called again,
1164 static INLINE ir_node *
1165 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1166 ir_node **in, int ins)
1169 ir_node *res, *known;
1171 /* allocate a new node on the obstack.
1172 This can return a node to which some of the pointers in the in-array
1174 Attention: the constructor copies the in array, i.e., the later changes
1175 to the array in this routine do not affect the constructed node! If
1176 the in array contains NULLs, there will be missing predecessors in the
1178 Is this a possible internal state of the Phi node generation? */
1179 #if USE_EXPLICIT_PHI_IN_STACK
1180 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1182 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1183 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1185 /* The in-array can contain NULLs. These were returned by
1186 get_r_value_internal if it reached the same block/definition on a
1188 The NULLs are replaced by the node itself to simplify the test in the
1190 for (i=0; i < ins; ++i)
1191 if (in[i] == NULL) in[i] = res;
1193 /* This loop checks whether the Phi has more than one predecessor.
1194 If so, it is a real Phi node and we break the loop. Else the
1195 Phi node merges the same definition on several paths and therefore
1197 for (i=0; i < ins; ++i)
1199 if (in[i]==res || in[i]==known) continue;
1207 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1209 #if USE_EXPLICIT_PHI_IN_STACK
1210 free_to_Phi_in_stack(res);
1212 obstack_free (current_ir_graph->obst, res);
1216 res = optimize_node (res);
1217 irn_vrfy_irg (res, irg);
1220 /* return the pointer to the Phi node. This node might be deallocated! */
1225 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1228 allocates and returns this node. The routine called to allocate the
1229 node might optimize it away and return a real value, or even a pointer
1230 to a deallocated Phi node on top of the obstack!
1231 This function is called with an in-array of proper size. **/
1233 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1235 ir_node *prevBlock, *res;
1238 /* This loop goes to all predecessor blocks of the block the Phi node is in
1239 and there finds the operands of the Phi node by calling
1240 get_r_value_internal. */
1241 for (i = 1; i <= ins; ++i) {
1242 assert (block->in[i]);
1243 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1245 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1248 /* After collecting all predecessors into the array nin a new Phi node
1249 with these predecessors is created. This constructor contains an
1250 optimization: If all predecessors of the Phi node are identical it
1251 returns the only operand instead of a new Phi node. If the value
1252 passes two different control flow edges without being defined, and
1253 this is the second path treated, a pointer to the node that will be
1254 allocated for the first path (recursion) is returned. We already
1255 know the address of this node, as it is the next node to be allocated
1256 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1257 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1259 /* Now we now the value for "pos" and can enter it in the array with
1260 all known local variables. Attention: this might be a pointer to
1261 a node, that later will be allocated!!! See new_rd_Phi_in.
1262 If this is called in mature, after some set_value in the same block,
1263 the proper value must not be overwritten:
1265 get_value (makes Phi0, put's it into graph_arr)
1266 set_value (overwrites Phi0 in graph_arr)
1267 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1270 if (!block->attr.block.graph_arr[pos]) {
1271 block->attr.block.graph_arr[pos] = res;
1273 /* printf(" value already computed by %s\n",
1274 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1280 /* This function returns the last definition of a variable. In case
1281 this variable was last defined in a previous block, Phi nodes are
1282 inserted. If the part of the firm graph containing the definition
1283 is not yet constructed, a dummy Phi node is returned. */
1285 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1288 /* There are 4 cases to treat.
1290 1. The block is not mature and we visit it the first time. We can not
1291 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1292 predecessors is returned. This node is added to the linked list (field
1293 "link") of the containing block to be completed when this block is
1294 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1297 2. The value is already known in this block, graph_arr[pos] is set and we
1298 visit the block the first time. We can return the value without
1299 creating any new nodes.
1301 3. The block is mature and we visit it the first time. A Phi node needs
1302 to be created (phi_merge). If the Phi is not needed, as all it's
1303 operands are the same value reaching the block through different
1304 paths, it's optimized away and the value itself is returned.
1306 4. The block is mature, and we visit it the second time. Now two
1307 subcases are possible:
1308 * The value was computed completely the last time we were here. This
1309 is the case if there is no loop. We can return the proper value.
1310 * The recursion that visited this node and set the flag did not
1311 return yet. We are computing a value in a loop and need to
1312 break the recursion without knowing the result yet.
1313 @@@ strange case. Straight forward we would create a Phi before
1314 starting the computation of it's predecessors. In this case we will
1315 find a Phi here in any case. The problem is that this implementation
1316 only creates a Phi after computing the predecessors, so that it is
1317 hard to compute self references of this Phi. @@@
1318 There is no simple check for the second subcase. Therefore we check
1319 for a second visit and treat all such cases as the second subcase.
1320 Anyways, the basic situation is the same: we reached a block
1321 on two paths without finding a definition of the value: No Phi
1322 nodes are needed on both paths.
1323 We return this information "Two paths, no Phi needed" by a very tricky
1324 implementation that relies on the fact that an obstack is a stack and
1325 will return a node with the same address on different allocations.
1326 Look also at phi_merge and new_rd_phi_in to understand this.
1327 @@@ Unfortunately this does not work, see testprogram
1328 three_cfpred_example.
1332 /* case 4 -- already visited. */
1333 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1335 /* visited the first time */
1336 set_irn_visited(block, get_irg_visited(current_ir_graph));
1338 /* Get the local valid value */
1339 res = block->attr.block.graph_arr[pos];
1341 /* case 2 -- If the value is actually computed, return it. */
1342 if (res) { return res;};
1344 if (block->attr.block.matured) { /* case 3 */
1346 /* The Phi has the same amount of ins as the corresponding block. */
1347 int ins = get_irn_arity(block);
1349 NEW_ARR_A (ir_node *, nin, ins);
1351 /* Phi merge collects the predecessors and then creates a node. */
1352 res = phi_merge (block, pos, mode, nin, ins);
1354 } else { /* case 1 */
1355 /* The block is not mature, we don't know how many in's are needed. A Phi
1356 with zero predecessors is created. Such a Phi node is called Phi0
1357 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1358 to the list of Phi0 nodes in this block to be matured by mature_block
1360 The Phi0 has to remember the pos of it's internal value. If the real
1361 Phi is computed, pos is used to update the array with the local
1364 res = new_rd_Phi0 (current_ir_graph, block, mode);
1365 res->attr.phi0_pos = pos;
1366 res->link = block->link;
1370 /* If we get here, the frontend missed a use-before-definition error */
1373 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1374 assert (mode->code >= irm_F && mode->code <= irm_P);
1375 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1376 tarval_mode_null[mode->code]);
1379 /* The local valid value is available now. */
1380 block->attr.block.graph_arr[pos] = res;
1388 it starts the recursion. This causes an Id at the entry of
1389 every block that has no definition of the value! **/
1391 #if USE_EXPLICIT_PHI_IN_STACK
1393 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1394 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1397 static INLINE ir_node *
1398 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1399 ir_node **in, int ins)
1402 ir_node *res, *known;
1404 /* Allocate a new node on the obstack. The allocation copies the in
1406 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1407 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1409 /* This loop checks whether the Phi has more than one predecessor.
1410 If so, it is a real Phi node and we break the loop. Else the
1411 Phi node merges the same definition on several paths and therefore
1412 is not needed. Don't consider Bad nodes! */
1414 for (i=0; i < ins; ++i)
1418 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1426 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1429 obstack_free (current_ir_graph->obst, res);
1432 /* A undefined value, e.g., in unreachable code. */
1436 res = optimize_node (res);
1437 irn_vrfy_irg (res, irg);
1438 /* Memory Phis in endless loops must be kept alive.
1439 As we can't distinguish these easily we keep all of the alive. */
1440 if ((res->op == op_Phi) && (mode == mode_M))
1441 add_End_keepalive(irg->end, res);
1448 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1450 #if PRECISE_EXC_CONTEXT
1452 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1454 static INLINE ir_node **
1455 new_frag_arr (ir_node *n) {
1458 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1459 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1460 sizeof(ir_node *)*current_ir_graph->n_loc);
1461 /* turn off optimization before allocating Proj nodes, as res isn't
1463 opt = get_optimize(); set_optimize(0);
1464 /* Here we rely on the fact that all frag ops have Memory as first result! */
1465 if (get_irn_op(n) == op_Call)
1466 arr[0] = new_Proj(n, mode_M, 3);
1468 arr[0] = new_Proj(n, mode_M, 0);
1470 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1474 static INLINE ir_node **
1475 get_frag_arr (ir_node *n) {
1476 if (get_irn_op(n) == op_Call) {
1477 return n->attr.call.frag_arr;
1478 } else if (get_irn_op(n) == op_Alloc) {
1479 return n->attr.a.frag_arr;
1481 return n->attr.frag_arr;
1486 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1487 if (!frag_arr[pos]) frag_arr[pos] = val;
1488 if (frag_arr[current_ir_graph->n_loc - 1])
1489 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1493 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1497 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1499 frag_arr = get_frag_arr(cfOp);
1500 res = frag_arr[pos];
1502 if (block->attr.block.graph_arr[pos]) {
1503 /* There was a set_value after the cfOp and no get_value before that
1504 set_value. We must build a Phi node now. */
1505 if (block->attr.block.matured) {
1506 int ins = get_irn_arity(block);
1508 NEW_ARR_A (ir_node *, nin, ins);
1509 res = phi_merge(block, pos, mode, nin, ins);
1511 res = new_rd_Phi0 (current_ir_graph, block, mode);
1512 res->attr.phi0_pos = pos;
1513 res->link = block->link;
1517 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1518 but this should be better: (remove comment if this works) */
1519 /* It's a Phi, we can write this into all graph_arrs with NULL */
1520 set_frag_value(block->attr.block.graph_arr, pos, res);
1522 res = get_r_value_internal(block, pos, mode);
1523 set_frag_value(block->attr.block.graph_arr, pos, res);
1531 computes the predecessors for the real phi node, and then
1532 allocates and returns this node. The routine called to allocate the
1533 node might optimize it away and return a real value.
1534 This function must be called with an in-array of proper size. **/
1536 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1538 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1541 /* If this block has no value at pos create a Phi0 and remember it
1542 in graph_arr to break recursions.
1543 Else we may not set graph_arr as there a later value is remembered. */
1545 if (!block->attr.block.graph_arr[pos]) {
1546 if (block == get_irg_start_block(current_ir_graph)) {
1547 /* Collapsing to Bad tarvals is no good idea.
1548 So we call a user-supplied routine here that deals with this case as
1549 appropriate for the given language. Sorryly the only help we can give
1550 here is the position.
1552 Even if all variables are defined before use, it can happen that
1553 we get to the start block, if a cond has been replaced by a tuple
1554 (bad, jmp). In this case we call the function needlessly, eventually
1555 generating an non existant error.
1556 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1559 if (default_initialize_local_variable)
1560 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1562 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1563 /* We don't need to care about exception ops in the start block.
1564 There are none by definition. */
1565 return block->attr.block.graph_arr[pos];
1567 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1568 block->attr.block.graph_arr[pos] = phi0;
1569 #if PRECISE_EXC_CONTEXT
1570 /* Set graph_arr for fragile ops. Also here we should break recursion.
1571 We could choose a cyclic path through an cfop. But the recursion would
1572 break at some point. */
1573 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1578 /* This loop goes to all predecessor blocks of the block the Phi node
1579 is in and there finds the operands of the Phi node by calling
1580 get_r_value_internal. */
1581 for (i = 1; i <= ins; ++i) {
1582 prevCfOp = skip_Proj(block->in[i]);
1584 if (is_Bad(prevCfOp)) {
1585 /* In case a Cond has been optimized we would get right to the start block
1586 with an invalid definition. */
1587 nin[i-1] = new_Bad();
1590 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1592 if (!is_Bad(prevBlock)) {
1593 #if PRECISE_EXC_CONTEXT
1594 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1595 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1596 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1599 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1601 nin[i-1] = new_Bad();
1605 /* After collecting all predecessors into the array nin a new Phi node
1606 with these predecessors is created. This constructor contains an
1607 optimization: If all predecessors of the Phi node are identical it
1608 returns the only operand instead of a new Phi node. */
1609 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1611 /* In case we allocated a Phi0 node at the beginning of this procedure,
1612 we need to exchange this Phi0 with the real Phi. */
1614 exchange(phi0, res);
1615 block->attr.block.graph_arr[pos] = res;
1616 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1617 only an optimization. */
1623 /* This function returns the last definition of a variable. In case
1624 this variable was last defined in a previous block, Phi nodes are
1625 inserted. If the part of the firm graph containing the definition
1626 is not yet constructed, a dummy Phi node is returned. */
1628 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1631 /* There are 4 cases to treat.
1633 1. The block is not mature and we visit it the first time. We can not
1634 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1635 predecessors is returned. This node is added to the linked list (field
1636 "link") of the containing block to be completed when this block is
1637 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1640 2. The value is already known in this block, graph_arr[pos] is set and we
1641 visit the block the first time. We can return the value without
1642 creating any new nodes.
1644 3. The block is mature and we visit it the first time. A Phi node needs
1645 to be created (phi_merge). If the Phi is not needed, as all it's
1646 operands are the same value reaching the block through different
1647 paths, it's optimized away and the value itself is returned.
1649 4. The block is mature, and we visit it the second time. Now two
1650 subcases are possible:
1651 * The value was computed completely the last time we were here. This
1652 is the case if there is no loop. We can return the proper value.
1653 * The recursion that visited this node and set the flag did not
1654 return yet. We are computing a value in a loop and need to
1655 break the recursion. This case only happens if we visited
1656 the same block with phi_merge before, which inserted a Phi0.
1657 So we return the Phi0.
1660 /* case 4 -- already visited. */
1661 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1662 /* As phi_merge allocates a Phi0 this value is always defined. Here
1663 is the critical difference of the two algorithms. */
1664 assert(block->attr.block.graph_arr[pos]);
1665 return block->attr.block.graph_arr[pos];
1668 /* visited the first time */
1669 set_irn_visited(block, get_irg_visited(current_ir_graph));
1671 /* Get the local valid value */
1672 res = block->attr.block.graph_arr[pos];
1674 /* case 2 -- If the value is actually computed, return it. */
1675 if (res) { return res; };
1677 if (block->attr.block.matured) { /* case 3 */
1679 /* The Phi has the same amount of ins as the corresponding block. */
1680 int ins = get_irn_arity(block);
1682 NEW_ARR_A (ir_node *, nin, ins);
1684 /* Phi merge collects the predecessors and then creates a node. */
1685 res = phi_merge (block, pos, mode, nin, ins);
1687 } else { /* case 1 */
1688 /* The block is not mature, we don't know how many in's are needed. A Phi
1689 with zero predecessors is created. Such a Phi node is called Phi0
1690 node. The Phi0 is then added to the list of Phi0 nodes in this block
1691 to be matured by mature_block later.
1692 The Phi0 has to remember the pos of it's internal value. If the real
1693 Phi is computed, pos is used to update the array with the local
1695 res = new_rd_Phi0 (current_ir_graph, block, mode);
1696 res->attr.phi0_pos = pos;
1697 res->link = block->link;
1701 /* If we get here, the frontend missed a use-before-definition error */
1704 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1705 assert (mode->code >= irm_F && mode->code <= irm_P);
1706 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1707 get_mode_null(mode));
1710 /* The local valid value is available now. */
1711 block->attr.block.graph_arr[pos] = res;
1716 #endif /* USE_FAST_PHI_CONSTRUCTION */
1718 /* ************************************************************************** */
1720 /** Finalize a Block node, when all control flows are known. */
1721 /** Acceptable parameters are only Block nodes. */
1723 mature_block (ir_node *block)
1730 assert (get_irn_opcode(block) == iro_Block);
1731 /* @@@ should be commented in
1732 assert (!get_Block_matured(block) && "Block already matured"); */
1734 if (!get_Block_matured(block)) {
1735 ins = ARR_LEN (block->in)-1;
1736 /* Fix block parameters */
1737 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1739 /* An array for building the Phi nodes. */
1740 NEW_ARR_A (ir_node *, nin, ins);
1742 /* Traverse a chain of Phi nodes attached to this block and mature
1744 for (n = block->link; n; n=next) {
1745 inc_irg_visited(current_ir_graph);
1747 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1750 block->attr.block.matured = 1;
1752 /* Now, as the block is a finished firm node, we can optimize it.
1753 Since other nodes have been allocated since the block was created
1754 we can not free the node on the obstack. Therefore we have to call
1756 Unfortunately the optimization does not change a lot, as all allocated
1757 nodes refer to the unoptimized node.
1758 We can call _2, as global cse has no effect on blocks. */
1759 block = optimize_in_place_2(block);
1760 irn_vrfy_irg(block, current_ir_graph);
1765 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1767 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1772 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1774 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1779 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1781 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1786 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1788 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1793 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1796 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1797 arg->attr.c.kind = fragmentary;
1798 arg->attr.c.default_proj = max_proj;
1799 res = new_Proj (arg, mode_X, max_proj);
1804 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1806 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1811 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1813 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1817 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1819 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1824 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1826 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1831 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1833 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1839 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1841 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1846 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1848 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1853 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1856 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1858 #if PRECISE_EXC_CONTEXT
1859 if ((current_ir_graph->phase_state == phase_building) &&
1860 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1861 res->attr.frag_arr = new_frag_arr(res);
1868 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1871 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1873 #if PRECISE_EXC_CONTEXT
1874 if ((current_ir_graph->phase_state == phase_building) &&
1875 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1876 res->attr.frag_arr = new_frag_arr(res);
1883 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1886 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1888 #if PRECISE_EXC_CONTEXT
1889 if ((current_ir_graph->phase_state == phase_building) &&
1890 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1891 res->attr.frag_arr = new_frag_arr(res);
1898 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1901 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1903 #if PRECISE_EXC_CONTEXT
1904 if ((current_ir_graph->phase_state == phase_building) &&
1905 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1906 res->attr.frag_arr = new_frag_arr(res);
1913 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1915 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1920 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1922 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1927 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1929 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1934 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1936 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1941 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1943 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1948 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1950 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1955 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1957 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1962 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1964 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1969 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1971 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1976 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1978 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1983 new_d_Jmp (dbg_info* db)
1985 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1989 new_d_Cond (dbg_info* db, ir_node *c)
1991 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1995 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1999 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2000 store, callee, arity, in, tp);
2001 #if PRECISE_EXC_CONTEXT
2002 if ((current_ir_graph->phase_state == phase_building) &&
2003 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
2004 res->attr.call.frag_arr = new_frag_arr(res);
2011 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2013 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2018 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2020 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2025 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2028 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2030 #if PRECISE_EXC_CONTEXT
2031 if ((current_ir_graph->phase_state == phase_building) &&
2032 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
2033 res->attr.frag_arr = new_frag_arr(res);
2040 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2043 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2045 #if PRECISE_EXC_CONTEXT
2046 if ((current_ir_graph->phase_state == phase_building) &&
2047 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
2048 res->attr.frag_arr = new_frag_arr(res);
2055 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2059 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2060 store, size, alloc_type, where);
2061 #if PRECISE_EXC_CONTEXT
2062 if ((current_ir_graph->phase_state == phase_building) &&
2063 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2064 res->attr.a.frag_arr = new_frag_arr(res);
2071 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2073 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2074 store, ptr, size, free_type);
2078 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2079 /* GL: objptr was called frame before. Frame was a bad choice for the name
2080 as the operand could as well be a pointer to a dynamic object. */
2082 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2083 store, objptr, 0, NULL, ent);
2087 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2089 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2090 store, objptr, n_index, index, sel);
2094 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2096 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2097 store, objptr, ent));
2101 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2103 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2108 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2110 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2118 return current_ir_graph->bad;
2122 new_d_Unknown (void)
2124 return current_ir_graph->unknown;
2128 new_d_CallBegin (dbg_info *db, ir_node *call)
2131 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2136 new_d_EndReg (dbg_info *db)
2139 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2144 new_d_EndExcept (dbg_info *db)
2147 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2152 new_d_Break (dbg_info *db)
2154 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2158 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2160 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2164 /* ********************************************************************* */
2165 /* Comfortable interface with automatic Phi node construction. */
2166 /* (Uses also constructors of ?? interface, except new_Block. */
2167 /* ********************************************************************* */
2169 /** Block construction **/
2170 /* immature Block without predecessors */
2171 ir_node *new_d_immBlock (dbg_info* db) {
2174 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2175 /* creates a new dynamic in-array as length of in is -1 */
2176 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2177 current_ir_graph->current_block = res;
2178 res->attr.block.matured = 0;
2179 res->attr.block.exc = exc_normal;
2180 res->attr.block.handler_entry = 0;
2181 res->attr.block.backedge = NULL;
2182 res->attr.block.in_cg = NULL;
2183 res->attr.block.cg_backedge = NULL;
2184 set_Block_block_visited(res, 0);
2186 /* Create and initialize array for Phi-node construction. */
2187 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2188 current_ir_graph->n_loc);
2189 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2191 /* Immature block may not be optimized! */
2192 irn_vrfy_irg (res, current_ir_graph);
2199 return new_d_immBlock(NULL);
2202 /* add an adge to a jmp/control flow node */
2204 add_in_edge (ir_node *block, ir_node *jmp)
2206 if (block->attr.block.matured) {
2207 assert(0 && "Error: Block already matured!\n");
2210 assert (jmp != NULL);
2211 ARR_APP1 (ir_node *, block->in, jmp);
2215 /* changing the current block */
2217 switch_block (ir_node *target)
2219 current_ir_graph->current_block = target;
2222 /* ************************ */
2223 /* parameter administration */
2225 /* get a value from the parameter array from the current block by its index */
2227 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2229 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2230 inc_irg_visited(current_ir_graph);
2232 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2234 /* get a value from the parameter array from the current block by its index */
2236 get_value (int pos, ir_mode *mode)
2238 return get_d_value(NULL, pos, mode);
2241 /* set a value at position pos in the parameter array from the current block */
2243 set_value (int pos, ir_node *value)
2245 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2246 assert(pos+1 < current_ir_graph->n_loc);
2247 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2250 /* get the current store */
2254 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2255 /* GL: one could call get_value instead */
2256 inc_irg_visited(current_ir_graph);
2257 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2260 /* set the current store */
2262 set_store (ir_node *store)
2264 /* GL: one could call set_value instead */
2265 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2266 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2270 keep_alive (ir_node *ka)
2272 add_End_keepalive(current_ir_graph->end, ka);
2275 /** Useful access routines **/
2276 /* Returns the current block of the current graph. To set the current
2277 block use switch_block(). */
2278 ir_node *get_cur_block() {
2279 return get_irg_current_block(current_ir_graph);
2282 /* Returns the frame type of the current graph */
2283 type *get_cur_frame_type() {
2284 return get_irg_frame_type(current_ir_graph);
2288 /* ********************************************************************* */
2291 /* call once for each run of the library */
2293 init_cons (default_initialize_local_variable_func_t *func)
2295 default_initialize_local_variable = func;
2298 /* call for each graph */
2300 finalize_cons (ir_graph *irg) {
2301 irg->phase_state = phase_high;
2305 ir_node *new_Block(int arity, ir_node **in) {
2306 return new_d_Block(NULL, arity, in);
2308 ir_node *new_Start (void) {
2309 return new_d_Start(NULL);
2311 ir_node *new_End (void) {
2312 return new_d_End(NULL);
2314 ir_node *new_Jmp (void) {
2315 return new_d_Jmp(NULL);
2317 ir_node *new_Cond (ir_node *c) {
2318 return new_d_Cond(NULL, c);
2320 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2321 return new_d_Return(NULL, store, arity, in);
2323 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2324 return new_d_Raise(NULL, store, obj);
2326 ir_node *new_Const (ir_mode *mode, tarval *con) {
2327 return new_d_Const(NULL, mode, con);
2329 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2330 return new_d_SymConst(NULL, value, kind);
2332 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2333 return new_d_simpleSel(NULL, store, objptr, ent);
2335 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2337 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2339 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2340 return (new_d_InstOf (NULL, store, objptr, ent));
2342 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2344 return new_d_Call(NULL, store, callee, arity, in, tp);
2346 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2347 return new_d_Add(NULL, op1, op2, mode);
2349 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2350 return new_d_Sub(NULL, op1, op2, mode);
2352 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2353 return new_d_Minus(NULL, op, mode);
2355 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2356 return new_d_Mul(NULL, op1, op2, mode);
2358 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2359 return new_d_Quot(NULL, memop, op1, op2);
2361 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2362 return new_d_DivMod(NULL, memop, op1, op2);
2364 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2365 return new_d_Div(NULL, memop, op1, op2);
2367 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2368 return new_d_Mod(NULL, memop, op1, op2);
2370 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2371 return new_d_Abs(NULL, op, mode);
2373 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2374 return new_d_And(NULL, op1, op2, mode);
2376 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2377 return new_d_Or(NULL, op1, op2, mode);
2379 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2380 return new_d_Eor(NULL, op1, op2, mode);
2382 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2383 return new_d_Not(NULL, op, mode);
2385 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2386 return new_d_Shl(NULL, op, k, mode);
2388 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2389 return new_d_Shr(NULL, op, k, mode);
2391 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2392 return new_d_Shrs(NULL, op, k, mode);
2394 #define new_Rotate new_Rot
2395 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2396 return new_d_Rot(NULL, op, k, mode);
2398 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2399 return new_d_Cmp(NULL, op1, op2);
2401 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2402 return new_d_Conv(NULL, op, mode);
2404 ir_node *new_Cast (ir_node *op, type *to_tp) {
2405 return new_d_Cast(NULL, op, to_tp);
2407 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2408 return new_d_Phi(NULL, arity, in, mode);
2410 ir_node *new_Load (ir_node *store, ir_node *addr) {
2411 return new_d_Load(NULL, store, addr);
2413 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2414 return new_d_Store(NULL, store, addr, val);
2416 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2417 where_alloc where) {
2418 return new_d_Alloc(NULL, store, size, alloc_type, where);
2420 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2422 return new_d_Free(NULL, store, ptr, size, free_type);
2424 ir_node *new_Sync (int arity, ir_node **in) {
2425 return new_d_Sync(NULL, arity, in);
2427 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2428 return new_d_Proj(NULL, arg, mode, proj);
2430 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2431 return new_d_defaultProj(NULL, arg, max_proj);
2433 ir_node *new_Tuple (int arity, ir_node **in) {
2434 return new_d_Tuple(NULL, arity, in);
2436 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2437 return new_d_Id(NULL, val, mode);
2439 ir_node *new_Bad (void) {
2442 ir_node *new_Unknown(void) {
2443 return new_d_Unknown();
2445 ir_node *new_CallBegin (ir_node *callee) {
2446 return new_d_CallBegin(NULL, callee);
2448 ir_node *new_EndReg (void) {
2449 return new_d_EndReg(NULL);
2451 ir_node *new_EndExcept (void) {
2452 return new_d_EndExcept(NULL);
2454 ir_node *new_Break (void) {
2455 return new_d_Break(NULL);
2457 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2458 return new_d_Filter(NULL, arg, mode, proj);