1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Martin Trapp, Christian Schaefer
6 * ircons.c: basic and more detailed irnode constructors
7 * store, block and parameter administration.
8 * Adapted to extended FIRM nodes (exceptions...) and commented
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
32 #if USE_EXPLICIT_PHI_IN_STACK
33 /* A stack needed for the automatic Phi node construction in constructor
34 Phi_in. Redefinition in irgraph.c!! */
39 typedef struct Phi_in_stack Phi_in_stack;
43 * language dependant initialization variable
45 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
47 /*** ******************************************** */
48 /** privat interfaces, for professional use only */
50 /* Constructs a Block with a fixed number of predecessors.
51 Does not set current_block. Can not be used with automatic
52 Phi node construction. */
54 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
58 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
59 set_Block_matured(res, 1);
60 set_Block_block_visited(res, 0);
62 res->attr.block.exc = exc_normal;
63 res->attr.block.handler_entry = 0;
64 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
65 res->attr.block.in_cg = NULL;
66 res->attr.block.cg_backedge = NULL;
68 irn_vrfy_irg (res, irg);
73 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
77 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 irn_vrfy_irg (res, irg);
84 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
88 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
90 irn_vrfy_irg (res, irg);
94 /* Creates a Phi node with all predecessors. Calling this constructor
95 is only allowed if the corresponding block is mature. */
97 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
101 bool has_unknown = false;
103 assert( get_Block_matured(block) );
104 assert( get_irn_arity(block) == arity );
106 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
108 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
110 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
111 if (!has_unknown) res = optimize_node (res);
112 irn_vrfy_irg (res, irg);
114 /* Memory Phis in endless loops must be kept alive.
115 As we can't distinguish these easily we keep all of them alive. */
116 if ((res->op == op_Phi) && (mode == mode_M))
117 add_End_keepalive(irg->end, res);
122 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
125 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
127 res = optimize_node (res);
128 irn_vrfy_irg (res, irg);
131 res = local_optimize_newby (res);
138 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
140 ir_node *in[1] = {val};
142 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
143 res = optimize_node (res);
144 irn_vrfy_irg (res, irg);
149 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
152 ir_node *in[1] = {arg};
154 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
155 res->attr.proj = proj;
158 assert(get_Proj_pred(res));
159 assert(get_nodes_Block(get_Proj_pred(res)));
161 res = optimize_node (res);
163 irn_vrfy_irg (res, irg);
169 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
173 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
174 arg->attr.c.kind = fragmentary;
175 arg->attr.c.default_proj = max_proj;
176 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
181 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
183 ir_node *in[1] = {op};
185 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
186 res = optimize_node (res);
187 irn_vrfy_irg (res, irg);
193 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
197 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
198 res = optimize_node (res);
199 irn_vrfy_irg (res, irg);
204 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
205 ir_node *op1, ir_node *op2, ir_mode *mode)
207 ir_node *in[2] = {op1, op2};
209 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
210 res = optimize_node (res);
211 irn_vrfy_irg (res, irg);
216 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
217 ir_node *op1, ir_node *op2, ir_mode *mode)
219 ir_node *in[2] = {op1, op2};
221 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
222 res = optimize_node (res);
223 irn_vrfy_irg (res, irg);
228 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
229 ir_node *op, ir_mode *mode)
231 ir_node *in[1] = {op};
233 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
234 res = optimize_node (res);
235 irn_vrfy_irg (res, irg);
240 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
241 ir_node *op1, ir_node *op2, ir_mode *mode)
243 ir_node *in[2] = {op1, op2};
245 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
246 res = optimize_node (res);
247 irn_vrfy_irg (res, irg);
252 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
253 ir_node *memop, ir_node *op1, ir_node *op2)
255 ir_node *in[3] = {memop, op1, op2};
257 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
258 res = optimize_node (res);
259 irn_vrfy_irg (res, irg);
264 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
265 ir_node *memop, ir_node *op1, ir_node *op2)
267 ir_node *in[3] = {memop, op1, op2};
269 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
270 res = optimize_node (res);
271 irn_vrfy_irg (res, irg);
276 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
277 ir_node *memop, ir_node *op1, ir_node *op2)
279 ir_node *in[3] = {memop, op1, op2};
281 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
282 res = optimize_node (res);
283 irn_vrfy_irg (res, irg);
288 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *memop, ir_node *op1, ir_node *op2)
291 ir_node *in[3] = {memop, op1, op2};
293 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
294 res = optimize_node (res);
295 irn_vrfy_irg (res, irg);
300 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *op1, ir_node *op2, ir_mode *mode)
303 ir_node *in[2] = {op1, op2};
305 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
306 res = optimize_node (res);
307 irn_vrfy_irg (res, irg);
312 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
313 ir_node *op1, ir_node *op2, ir_mode *mode)
315 ir_node *in[2] = {op1, op2};
317 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
318 res = optimize_node (res);
319 irn_vrfy_irg (res, irg);
324 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
325 ir_node *op1, ir_node *op2, ir_mode *mode)
327 ir_node *in[2] = {op1, op2};
329 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
330 res = optimize_node (res);
331 irn_vrfy_irg (res, irg);
336 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
337 ir_node *op, ir_mode *mode)
339 ir_node *in[1] = {op};
341 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
342 res = optimize_node (res);
343 irn_vrfy_irg (res, irg);
348 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
349 ir_node *op, ir_node *k, ir_mode *mode)
351 ir_node *in[2] = {op, k};
353 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
354 res = optimize_node (res);
355 irn_vrfy_irg (res, irg);
360 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op, ir_node *k, ir_mode *mode)
363 ir_node *in[2] = {op, k};
365 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
366 res = optimize_node (res);
367 irn_vrfy_irg (res, irg);
372 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
373 ir_node *op, ir_node *k, ir_mode *mode)
375 ir_node *in[2] = {op, k};
377 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
378 res = optimize_node (res);
379 irn_vrfy_irg (res, irg);
384 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
385 ir_node *op, ir_node *k, ir_mode *mode)
387 ir_node *in[2] = {op, k};
389 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
390 res = optimize_node (res);
391 irn_vrfy_irg (res, irg);
396 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
397 ir_node *op, ir_mode *mode)
399 ir_node *in[1] = {op};
401 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
402 res = optimize_node (res);
403 irn_vrfy_irg (res, irg);
408 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
409 ir_node *op1, ir_node *op2)
411 ir_node *in[2] = {op1, op2};
413 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
414 res = optimize_node (res);
415 irn_vrfy_irg (res, irg);
420 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
423 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
424 res = optimize_node (res);
425 irn_vrfy_irg (res, irg);
430 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
432 ir_node *in[1] = {c};
434 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
435 res->attr.c.kind = dense;
436 res->attr.c.default_proj = 0;
437 res = optimize_node (res);
438 irn_vrfy_irg (res, irg);
443 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
444 ir_node *callee, int arity, ir_node **in, type *tp)
451 NEW_ARR_A (ir_node *, r_in, r_arity);
454 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
456 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
458 assert(is_method_type(tp));
459 set_Call_type(res, tp);
460 res->attr.call.callee_arr = NULL;
461 res = optimize_node (res);
462 irn_vrfy_irg (res, irg);
467 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
468 ir_node *store, int arity, ir_node **in)
475 NEW_ARR_A (ir_node *, r_in, r_arity);
477 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
478 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
479 res = optimize_node (res);
480 irn_vrfy_irg (res, irg);
485 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
487 ir_node *in[2] = {store, obj};
489 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
490 res = optimize_node (res);
491 irn_vrfy_irg (res, irg);
496 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
497 ir_node *store, ir_node *adr)
499 ir_node *in[2] = {store, adr};
501 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
503 res = optimize_node (res);
504 irn_vrfy_irg (res, irg);
509 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
510 ir_node *store, ir_node *adr, ir_node *val)
512 ir_node *in[3] = {store, adr, val};
514 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
516 res = optimize_node (res);
518 irn_vrfy_irg (res, irg);
523 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
524 ir_node *size, type *alloc_type, where_alloc where)
526 ir_node *in[2] = {store, size};
528 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
530 res->attr.a.where = where;
531 res->attr.a.type = alloc_type;
533 res = optimize_node (res);
534 irn_vrfy_irg (res, irg);
539 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
540 ir_node *ptr, ir_node *size, type *free_type)
542 ir_node *in[3] = {store, ptr, size};
544 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
546 res->attr.f = free_type;
548 res = optimize_node (res);
549 irn_vrfy_irg (res, irg);
554 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
555 int arity, ir_node **in, entity *ent)
562 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
565 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
566 res = new_ir_node (db, irg, block, op_Sel, mode_P, r_arity, r_in);
568 res->attr.s.ent = ent;
570 res = optimize_node (res);
571 irn_vrfy_irg (res, irg);
576 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
577 ir_node *objptr, type *ent)
584 NEW_ARR_A (ir_node *, r_in, r_arity);
588 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
590 res->attr.io.ent = ent;
592 /* res = optimize (res);
593 * irn_vrfy_irg (res, irg); */
598 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
599 symconst_kind symkind)
603 if (symkind == linkage_ptr_info)
607 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
609 res->attr.i.num = symkind;
610 if (symkind == linkage_ptr_info) {
611 res->attr.i.tori.ptrinfo = (ident *)value;
613 assert ( ( (symkind == type_tag)
614 || (symkind == size))
615 && (is_type(value)));
616 res->attr.i.tori.typ = (type *)value;
618 res = optimize_node (res);
619 irn_vrfy_irg (res, irg);
624 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
628 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
630 res = optimize_node (res);
631 irn_vrfy_irg (res, irg);
636 new_rd_Bad (ir_graph *irg)
642 new_rd_Unknown (ir_graph *irg)
648 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
650 ir_node *in[1] = { get_Call_ptr(call) };
652 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
653 res->attr.callbegin.irg = irg;
654 res->attr.callbegin.call = call;
655 res = optimize_node (res);
656 irn_vrfy_irg (res, irg);
661 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
665 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
666 res->attr.end.irg = irg;
668 irn_vrfy_irg (res, irg);
673 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
677 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
678 res->attr.end.irg = irg;
680 irn_vrfy_irg (res, irg);
685 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
688 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
689 res = optimize_node (res);
690 irn_vrfy_irg (res, irg);
695 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
698 ir_node *in[1] = {arg};
700 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
701 res->attr.filter.proj = proj;
702 res->attr.filter.in_cg = NULL;
703 res->attr.filter.backedge = NULL;
706 assert(get_Proj_pred(res));
707 assert(get_nodes_Block(get_Proj_pred(res)));
709 res = optimize_node (res);
711 irn_vrfy_irg (res, irg);
716 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
717 return new_rd_Block(NULL, irg, arity, in);
719 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
720 return new_rd_Start(NULL, irg, block);
722 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
723 return new_rd_End(NULL, irg, block);
725 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
726 return new_rd_Jmp(NULL, irg, block);
728 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
729 return new_rd_Cond(NULL, irg, block, c);
731 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
732 ir_node *store, int arity, ir_node **in) {
733 return new_rd_Return(NULL, irg, block, store, arity, in);
735 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
736 ir_node *store, ir_node *obj) {
737 return new_rd_Raise(NULL, irg, block, store, obj);
739 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
740 ir_mode *mode, tarval *con) {
741 return new_rd_Const(NULL, irg, block, mode, con);
743 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
744 type_or_id_p value, symconst_kind symkind) {
745 return new_rd_SymConst(NULL, irg, block, value, symkind);
747 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
748 ir_node *objptr, int n_index, ir_node **index,
750 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
752 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
754 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
756 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
757 ir_node *callee, int arity, ir_node **in,
759 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
761 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
762 ir_node *op1, ir_node *op2, ir_mode *mode) {
763 return new_rd_Add(NULL, irg, block, op1, op2, mode);
765 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
766 ir_node *op1, ir_node *op2, ir_mode *mode) {
767 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
769 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
770 ir_node *op, ir_mode *mode) {
771 return new_rd_Minus(NULL, irg, block, op, mode);
773 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
774 ir_node *op1, ir_node *op2, ir_mode *mode) {
775 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
777 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
778 ir_node *memop, ir_node *op1, ir_node *op2) {
779 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
781 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
782 ir_node *memop, ir_node *op1, ir_node *op2) {
783 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
785 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
786 ir_node *memop, ir_node *op1, ir_node *op2) {
787 return new_rd_Div(NULL, irg, block, memop, op1, op2);
789 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
790 ir_node *memop, ir_node *op1, ir_node *op2) {
791 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
793 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
794 ir_node *op, ir_mode *mode) {
795 return new_rd_Abs(NULL, irg, block, op, mode);
797 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
798 ir_node *op1, ir_node *op2, ir_mode *mode) {
799 return new_rd_And(NULL, irg, block, op1, op2, mode);
801 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
802 ir_node *op1, ir_node *op2, ir_mode *mode) {
803 return new_rd_Or(NULL, irg, block, op1, op2, mode);
805 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
806 ir_node *op1, ir_node *op2, ir_mode *mode) {
807 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
809 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
810 ir_node *op, ir_mode *mode) {
811 return new_rd_Not(NULL, irg, block, op, mode);
813 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
814 ir_node *op1, ir_node *op2) {
815 return new_rd_Cmp(NULL, irg, block, op1, op2);
817 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
818 ir_node *op, ir_node *k, ir_mode *mode) {
819 return new_rd_Shl(NULL, irg, block, op, k, mode);
821 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
822 ir_node *op, ir_node *k, ir_mode *mode) {
823 return new_rd_Shr(NULL, irg, block, op, k, mode);
825 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
826 ir_node *op, ir_node *k, ir_mode *mode) {
827 return new_rd_Shrs(NULL, irg, block, op, k, mode);
829 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
830 ir_node *op, ir_node *k, ir_mode *mode) {
831 return new_rd_Rot(NULL, irg, block, op, k, mode);
833 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
834 ir_node *op, ir_mode *mode) {
835 return new_rd_Conv(NULL, irg, block, op, mode);
837 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
838 ir_node **in, ir_mode *mode) {
839 return new_rd_Phi(NULL, irg, block, arity, in, mode);
841 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
842 ir_node *store, ir_node *adr) {
843 return new_rd_Load(NULL, irg, block, store, adr);
845 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
846 ir_node *store, ir_node *adr, ir_node *val) {
847 return new_rd_Store(NULL, irg, block, store, adr, val);
849 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
850 ir_node *size, type *alloc_type, where_alloc where) {
851 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
853 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
854 ir_node *ptr, ir_node *size, type *free_type) {
855 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
857 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
858 return new_rd_Sync(NULL, irg, block, arity, in);
860 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
861 ir_mode *mode, long proj) {
862 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
864 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
866 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
868 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
869 int arity, ir_node **in) {
870 return new_rd_Tuple(NULL, irg, block, arity, in );
872 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
873 ir_node *val, ir_mode *mode) {
874 return new_rd_Id(NULL, irg, block, val, mode);
876 INLINE ir_node *new_r_Bad (ir_graph *irg) {
877 return new_rd_Bad(irg);
879 INLINE ir_node *new_r_Unknown (ir_graph *irg) {
880 return new_rd_Unknown(irg);
882 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
883 return new_rd_CallBegin(NULL, irg, block, callee);
885 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
886 return new_rd_EndReg(NULL, irg, block);
888 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
889 return new_rd_EndExcept(NULL, irg, block);
891 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
892 return new_rd_Break(NULL, irg, block);
894 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
895 ir_mode *mode, long proj) {
896 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
900 /** ********************/
901 /** public interfaces */
902 /** construction tools */
906 * - create a new Start node in the current block
908 * @return s - pointer to the created Start node
913 new_d_Start (dbg_info* db)
917 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
918 op_Start, mode_T, 0, NULL);
920 res = optimize_node (res);
921 irn_vrfy_irg (res, current_ir_graph);
926 new_d_End (dbg_info* db)
929 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
930 op_End, mode_X, -1, NULL);
931 res = optimize_node (res);
932 irn_vrfy_irg (res, current_ir_graph);
937 /* Constructs a Block with a fixed number of predecessors.
938 Does set current_block. Can be used with automatic Phi
939 node construction. */
941 new_d_Block (dbg_info* db, int arity, ir_node **in)
945 bool has_unknown = false;
947 res = new_rd_Block (db, current_ir_graph, arity, in);
949 /* Create and initialize array for Phi-node construction. */
950 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
951 current_ir_graph->n_loc);
952 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
954 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
956 if (!has_unknown) res = optimize_node (res);
957 current_ir_graph->current_block = res;
959 irn_vrfy_irg (res, current_ir_graph);
964 /* ***********************************************************************/
965 /* Methods necessary for automatic Phi node creation */
967 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
968 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
969 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
970 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
972 Call Graph: ( A ---> B == A "calls" B)
974 get_value mature_block
982 get_r_value_internal |
986 new_rd_Phi0 new_rd_Phi_in
988 * *************************************************************************** */
990 /* Creates a Phi node with 0 predecessors */
991 static INLINE ir_node *
992 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
995 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
996 irn_vrfy_irg (res, irg);
1000 /* There are two implementations of the Phi node construction. The first
1001 is faster, but does not work for blocks with more than 2 predecessors.
1002 The second works always but is slower and causes more unnecessary Phi
1004 Select the implementations by the following preprocessor flag set in
1006 #if USE_FAST_PHI_CONSTRUCTION
1008 /* This is a stack used for allocating and deallocating nodes in
1009 new_rd_Phi_in. The original implementation used the obstack
1010 to model this stack, now it is explicit. This reduces side effects.
1012 #if USE_EXPLICIT_PHI_IN_STACK
1013 INLINE Phi_in_stack *
1014 new_Phi_in_stack() {
1017 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1019 res->stack = NEW_ARR_F (ir_node *, 1);
1026 free_Phi_in_stack(Phi_in_stack *s) {
1027 DEL_ARR_F(s->stack);
1031 free_to_Phi_in_stack(ir_node *phi) {
1032 assert(get_irn_opcode(phi) == iro_Phi);
1034 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1035 current_ir_graph->Phi_in_stack->pos)
1036 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1038 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1040 (current_ir_graph->Phi_in_stack->pos)++;
1043 static INLINE ir_node *
1044 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1045 int arity, ir_node **in) {
1047 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1048 int pos = current_ir_graph->Phi_in_stack->pos;
1052 /* We need to allocate a new node */
1053 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1054 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1056 /* reuse the old node and initialize it again. */
1059 assert (res->kind == k_ir_node);
1060 assert (res->op == op_Phi);
1064 assert (arity >= 0);
1065 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1066 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1068 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1070 (current_ir_graph->Phi_in_stack->pos)--;
1074 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1076 /* Creates a Phi node with a given, fixed array **in of predecessors.
1077 If the Phi node is unnecessary, as the same value reaches the block
1078 through all control flow paths, it is eliminated and the value
1079 returned directly. This constructor is only intended for use in
1080 the automatic Phi node generation triggered by get_value or mature.
1081 The implementation is quite tricky and depends on the fact, that
1082 the nodes are allocated on a stack:
1083 The in array contains predecessors and NULLs. The NULLs appear,
1084 if get_r_value_internal, that computed the predecessors, reached
1085 the same block on two paths. In this case the same value reaches
1086 this block on both paths, there is no definition in between. We need
1087 not allocate a Phi where these path's merge, but we have to communicate
1088 this fact to the caller. This happens by returning a pointer to the
1089 node the caller _will_ allocate. (Yes, we predict the address. We can
1090 do so because the nodes are allocated on the obstack.) The caller then
1091 finds a pointer to itself and, when this routine is called again,
1094 static INLINE ir_node *
1095 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1096 ir_node **in, int ins)
1099 ir_node *res, *known;
1101 /* allocate a new node on the obstack.
1102 This can return a node to which some of the pointers in the in-array
1104 Attention: the constructor copies the in array, i.e., the later changes
1105 to the array in this routine do not affect the constructed node! If
1106 the in array contains NULLs, there will be missing predecessors in the
1108 Is this a possible internal state of the Phi node generation? */
1109 #if USE_EXPLICIT_PHI_IN_STACK
1110 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1112 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1113 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1115 /* The in-array can contain NULLs. These were returned by
1116 get_r_value_internal if it reached the same block/definition on a
1118 The NULLs are replaced by the node itself to simplify the test in the
1120 for (i=0; i < ins; ++i)
1121 if (in[i] == NULL) in[i] = res;
1123 /* This loop checks whether the Phi has more than one predecessor.
1124 If so, it is a real Phi node and we break the loop. Else the
1125 Phi node merges the same definition on several paths and therefore
1127 for (i=0; i < ins; ++i)
1129 if (in[i]==res || in[i]==known) continue;
1137 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1139 #if USE_EXPLICIT_PHI_IN_STACK
1140 free_to_Phi_in_stack(res);
1142 obstack_free (current_ir_graph->obst, res);
1146 res = optimize_node (res);
1147 irn_vrfy_irg (res, irg);
1150 /* return the pointer to the Phi node. This node might be deallocated! */
1155 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1158 allocates and returns this node. The routine called to allocate the
1159 node might optimize it away and return a real value, or even a pointer
1160 to a deallocated Phi node on top of the obstack!
1161 This function is called with an in-array of proper size. **/
1163 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1165 ir_node *prevBlock, *res;
1168 /* This loop goes to all predecessor blocks of the block the Phi node is in
1169 and there finds the operands of the Phi node by calling
1170 get_r_value_internal. */
1171 for (i = 1; i <= ins; ++i) {
1172 assert (block->in[i]);
1173 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1175 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1178 /* After collecting all predecessors into the array nin a new Phi node
1179 with these predecessors is created. This constructor contains an
1180 optimization: If all predecessors of the Phi node are identical it
1181 returns the only operand instead of a new Phi node. If the value
1182 passes two different control flow edges without being defined, and
1183 this is the second path treated, a pointer to the node that will be
1184 allocated for the first path (recursion) is returned. We already
1185 know the address of this node, as it is the next node to be allocated
1186 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1187 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1189 /* Now we now the value for "pos" and can enter it in the array with
1190 all known local variables. Attention: this might be a pointer to
1191 a node, that later will be allocated!!! See new_rd_Phi_in.
1192 If this is called in mature, after some set_value in the same block,
1193 the proper value must not be overwritten:
1195 get_value (makes Phi0, put's it into graph_arr)
1196 set_value (overwrites Phi0 in graph_arr)
1197 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1200 if (!block->attr.block.graph_arr[pos]) {
1201 block->attr.block.graph_arr[pos] = res;
1203 /* printf(" value already computed by %s\n",
1204 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1210 /* This function returns the last definition of a variable. In case
1211 this variable was last defined in a previous block, Phi nodes are
1212 inserted. If the part of the firm graph containing the definition
1213 is not yet constructed, a dummy Phi node is returned. */
1215 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1218 /* There are 4 cases to treat.
1220 1. The block is not mature and we visit it the first time. We can not
1221 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1222 predecessors is returned. This node is added to the linked list (field
1223 "link") of the containing block to be completed when this block is
1224 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1227 2. The value is already known in this block, graph_arr[pos] is set and we
1228 visit the block the first time. We can return the value without
1229 creating any new nodes.
1231 3. The block is mature and we visit it the first time. A Phi node needs
1232 to be created (phi_merge). If the Phi is not needed, as all it's
1233 operands are the same value reaching the block through different
1234 paths, it's optimized away and the value itself is returned.
1236 4. The block is mature, and we visit it the second time. Now two
1237 subcases are possible:
1238 * The value was computed completely the last time we were here. This
1239 is the case if there is no loop. We can return the proper value.
1240 * The recursion that visited this node and set the flag did not
1241 return yet. We are computing a value in a loop and need to
1242 break the recursion without knowing the result yet.
1243 @@@ strange case. Straight forward we would create a Phi before
1244 starting the computation of it's predecessors. In this case we will
1245 find a Phi here in any case. The problem is that this implementation
1246 only creates a Phi after computing the predecessors, so that it is
1247 hard to compute self references of this Phi. @@@
1248 There is no simple check for the second subcase. Therefore we check
1249 for a second visit and treat all such cases as the second subcase.
1250 Anyways, the basic situation is the same: we reached a block
1251 on two paths without finding a definition of the value: No Phi
1252 nodes are needed on both paths.
1253 We return this information "Two paths, no Phi needed" by a very tricky
1254 implementation that relies on the fact that an obstack is a stack and
1255 will return a node with the same address on different allocations.
1256 Look also at phi_merge and new_rd_phi_in to understand this.
1257 @@@ Unfortunately this does not work, see testprogram
1258 three_cfpred_example.
1262 /* case 4 -- already visited. */
1263 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1265 /* visited the first time */
1266 set_irn_visited(block, get_irg_visited(current_ir_graph));
1268 /* Get the local valid value */
1269 res = block->attr.block.graph_arr[pos];
1271 /* case 2 -- If the value is actually computed, return it. */
1272 if (res) { return res;};
1274 if (block->attr.block.matured) { /* case 3 */
1276 /* The Phi has the same amount of ins as the corresponding block. */
1277 int ins = get_irn_arity(block);
1279 NEW_ARR_A (ir_node *, nin, ins);
1281 /* Phi merge collects the predecessors and then creates a node. */
1282 res = phi_merge (block, pos, mode, nin, ins);
1284 } else { /* case 1 */
1285 /* The block is not mature, we don't know how many in's are needed. A Phi
1286 with zero predecessors is created. Such a Phi node is called Phi0
1287 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1288 to the list of Phi0 nodes in this block to be matured by mature_block
1290 The Phi0 has to remember the pos of it's internal value. If the real
1291 Phi is computed, pos is used to update the array with the local
1294 res = new_rd_Phi0 (current_ir_graph, block, mode);
1295 res->attr.phi0_pos = pos;
1296 res->link = block->link;
1300 /* If we get here, the frontend missed a use-before-definition error */
1303 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1304 assert (mode->code >= irm_F && mode->code <= irm_P);
1305 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1306 tarval_mode_null[mode->code]);
1309 /* The local valid value is available now. */
1310 block->attr.block.graph_arr[pos] = res;
1318 it starts the recursion. This causes an Id at the entry of
1319 every block that has no definition of the value! **/
1321 #if USE_EXPLICIT_PHI_IN_STACK
1323 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1324 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1327 static INLINE ir_node *
1328 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1329 ir_node **in, int ins)
1332 ir_node *res, *known;
1334 /* Allocate a new node on the obstack. The allocation copies the in
1336 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1337 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1339 /* This loop checks whether the Phi has more than one predecessor.
1340 If so, it is a real Phi node and we break the loop. Else the
1341 Phi node merges the same definition on several paths and therefore
1342 is not needed. Don't consider Bad nodes! */
1344 for (i=0; i < ins; ++i)
1348 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1356 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1359 obstack_free (current_ir_graph->obst, res);
1362 /* A undefined value, e.g., in unreachable code. */
1366 res = optimize_node (res);
1367 irn_vrfy_irg (res, irg);
1368 /* Memory Phis in endless loops must be kept alive.
1369 As we can't distinguish these easily we keep all of the alive. */
1370 if ((res->op == op_Phi) && (mode == mode_M))
1371 add_End_keepalive(irg->end, res);
1378 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1380 #if PRECISE_EXC_CONTEXT
1382 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1384 static INLINE ir_node **
1385 new_frag_arr (ir_node *n) {
1388 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1389 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1390 sizeof(ir_node *)*current_ir_graph->n_loc);
1391 /* turn off optimization before allocating Proj nodes, as res isn't
1393 opt = get_optimize(); set_optimize(0);
1394 /* Here we rely on the fact that all frag ops have Memory as first result! */
1395 if (get_irn_op(n) == op_Call)
1396 arr[0] = new_Proj(n, mode_M, 3);
1398 arr[0] = new_Proj(n, mode_M, 0);
1400 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1404 static INLINE ir_node **
1405 get_frag_arr (ir_node *n) {
1406 if (get_irn_op(n) == op_Call) {
1407 return n->attr.call.frag_arr;
1408 } else if (get_irn_op(n) == op_Alloc) {
1409 return n->attr.a.frag_arr;
1411 return n->attr.frag_arr;
1416 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1417 if (!frag_arr[pos]) frag_arr[pos] = val;
1418 if (frag_arr[current_ir_graph->n_loc - 1])
1419 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1423 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1427 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1429 frag_arr = get_frag_arr(cfOp);
1430 res = frag_arr[pos];
1432 if (block->attr.block.graph_arr[pos]) {
1433 /* There was a set_value after the cfOp and no get_value before that
1434 set_value. We must build a Phi node now. */
1435 if (block->attr.block.matured) {
1436 int ins = get_irn_arity(block);
1438 NEW_ARR_A (ir_node *, nin, ins);
1439 res = phi_merge(block, pos, mode, nin, ins);
1441 res = new_rd_Phi0 (current_ir_graph, block, mode);
1442 res->attr.phi0_pos = pos;
1443 res->link = block->link;
1447 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1448 but this should be better: (remove comment if this works) */
1449 /* It's a Phi, we can write this into all graph_arrs with NULL */
1450 set_frag_value(block->attr.block.graph_arr, pos, res);
1452 res = get_r_value_internal(block, pos, mode);
1453 set_frag_value(block->attr.block.graph_arr, pos, res);
1461 computes the predecessors for the real phi node, and then
1462 allocates and returns this node. The routine called to allocate the
1463 node might optimize it away and return a real value.
1464 This function must be called with an in-array of proper size. **/
1466 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1468 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1471 /* If this block has no value at pos create a Phi0 and remember it
1472 in graph_arr to break recursions.
1473 Else we may not set graph_arr as there a later value is remembered. */
1475 if (!block->attr.block.graph_arr[pos]) {
1476 if (block == get_irg_start_block(current_ir_graph)) {
1477 /* Collapsing to Bad tarvals is no good idea.
1478 So we call a user-supplied routine here that deals with this case as
1479 appropriate for the given language. Sorryly the only help we can give
1480 here is the position.
1482 Even if all variables are defined before use, it can happen that
1483 we get to the start block, if a cond has been replaced by a tuple
1484 (bad, jmp). In this case we call the function needlessly, eventually
1485 generating an non existant error.
1486 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1489 if (default_initialize_local_variable)
1490 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1492 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1493 /* We don't need to care about exception ops in the start block.
1494 There are none by definition. */
1495 return block->attr.block.graph_arr[pos];
1497 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1498 block->attr.block.graph_arr[pos] = phi0;
1499 #if PRECISE_EXC_CONTEXT
1500 /* Set graph_arr for fragile ops. Also here we should break recursion.
1501 We could choose a cyclic path through an cfop. But the recursion would
1502 break at some point. */
1503 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1508 /* This loop goes to all predecessor blocks of the block the Phi node
1509 is in and there finds the operands of the Phi node by calling
1510 get_r_value_internal. */
1511 for (i = 1; i <= ins; ++i) {
1512 prevCfOp = skip_Proj(block->in[i]);
1514 if (is_Bad(prevCfOp)) {
1515 /* In case a Cond has been optimized we would get right to the start block
1516 with an invalid definition. */
1517 nin[i-1] = new_Bad();
1520 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1522 if (!is_Bad(prevBlock)) {
1523 #if PRECISE_EXC_CONTEXT
1524 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1525 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1526 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1529 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1531 nin[i-1] = new_Bad();
1535 /* After collecting all predecessors into the array nin a new Phi node
1536 with these predecessors is created. This constructor contains an
1537 optimization: If all predecessors of the Phi node are identical it
1538 returns the only operand instead of a new Phi node. */
1539 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1541 /* In case we allocated a Phi0 node at the beginning of this procedure,
1542 we need to exchange this Phi0 with the real Phi. */
1544 exchange(phi0, res);
1545 block->attr.block.graph_arr[pos] = res;
1546 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1547 only an optimization. */
1553 /* This function returns the last definition of a variable. In case
1554 this variable was last defined in a previous block, Phi nodes are
1555 inserted. If the part of the firm graph containing the definition
1556 is not yet constructed, a dummy Phi node is returned. */
1558 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1561 /* There are 4 cases to treat.
1563 1. The block is not mature and we visit it the first time. We can not
1564 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1565 predecessors is returned. This node is added to the linked list (field
1566 "link") of the containing block to be completed when this block is
1567 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1570 2. The value is already known in this block, graph_arr[pos] is set and we
1571 visit the block the first time. We can return the value without
1572 creating any new nodes.
1574 3. The block is mature and we visit it the first time. A Phi node needs
1575 to be created (phi_merge). If the Phi is not needed, as all it's
1576 operands are the same value reaching the block through different
1577 paths, it's optimized away and the value itself is returned.
1579 4. The block is mature, and we visit it the second time. Now two
1580 subcases are possible:
1581 * The value was computed completely the last time we were here. This
1582 is the case if there is no loop. We can return the proper value.
1583 * The recursion that visited this node and set the flag did not
1584 return yet. We are computing a value in a loop and need to
1585 break the recursion. This case only happens if we visited
1586 the same block with phi_merge before, which inserted a Phi0.
1587 So we return the Phi0.
1590 /* case 4 -- already visited. */
1591 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1592 /* As phi_merge allocates a Phi0 this value is always defined. Here
1593 is the critical difference of the two algorithms. */
1594 assert(block->attr.block.graph_arr[pos]);
1595 return block->attr.block.graph_arr[pos];
1598 /* visited the first time */
1599 set_irn_visited(block, get_irg_visited(current_ir_graph));
1601 /* Get the local valid value */
1602 res = block->attr.block.graph_arr[pos];
1604 /* case 2 -- If the value is actually computed, return it. */
1605 if (res) { return res; };
1607 if (block->attr.block.matured) { /* case 3 */
1609 /* The Phi has the same amount of ins as the corresponding block. */
1610 int ins = get_irn_arity(block);
1612 NEW_ARR_A (ir_node *, nin, ins);
1614 /* Phi merge collects the predecessors and then creates a node. */
1615 res = phi_merge (block, pos, mode, nin, ins);
1617 } else { /* case 1 */
1618 /* The block is not mature, we don't know how many in's are needed. A Phi
1619 with zero predecessors is created. Such a Phi node is called Phi0
1620 node. The Phi0 is then added to the list of Phi0 nodes in this block
1621 to be matured by mature_block later.
1622 The Phi0 has to remember the pos of it's internal value. If the real
1623 Phi is computed, pos is used to update the array with the local
1625 res = new_rd_Phi0 (current_ir_graph, block, mode);
1626 res->attr.phi0_pos = pos;
1627 res->link = block->link;
1631 /* If we get here, the frontend missed a use-before-definition error */
1634 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1635 assert (mode->code >= irm_F && mode->code <= irm_P);
1636 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1637 get_mode_null(mode));
1640 /* The local valid value is available now. */
1641 block->attr.block.graph_arr[pos] = res;
1646 #endif /* USE_FAST_PHI_CONSTRUCTION */
1648 /* ************************************************************************** */
1650 /** Finalize a Block node, when all control flows are known. */
1651 /** Acceptable parameters are only Block nodes. */
1653 mature_block (ir_node *block)
1660 assert (get_irn_opcode(block) == iro_Block);
1661 /* @@@ should be commented in
1662 assert (!get_Block_matured(block) && "Block already matured"); */
1664 if (!get_Block_matured(block)) {
1665 ins = ARR_LEN (block->in)-1;
1666 /* Fix block parameters */
1667 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1669 /* An array for building the Phi nodes. */
1670 NEW_ARR_A (ir_node *, nin, ins);
1672 /* Traverse a chain of Phi nodes attached to this block and mature
1674 for (n = block->link; n; n=next) {
1675 inc_irg_visited(current_ir_graph);
1677 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1680 block->attr.block.matured = 1;
1682 /* Now, as the block is a finished firm node, we can optimize it.
1683 Since other nodes have been allocated since the block was created
1684 we can not free the node on the obstack. Therefore we have to call
1686 Unfortunately the optimization does not change a lot, as all allocated
1687 nodes refer to the unoptimized node.
1688 We can call _2, as global cse has no effect on blocks. */
1689 block = optimize_in_place_2(block);
1690 irn_vrfy_irg(block, current_ir_graph);
1695 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1697 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1702 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1704 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1709 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1711 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1716 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1718 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1723 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1726 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1727 arg->attr.c.kind = fragmentary;
1728 arg->attr.c.default_proj = max_proj;
1729 res = new_Proj (arg, mode_X, max_proj);
1734 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1736 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1741 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1743 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1748 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1750 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1755 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1757 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1763 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1765 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1770 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1772 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1777 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1780 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1782 #if PRECISE_EXC_CONTEXT
1783 if ((current_ir_graph->phase_state == phase_building) &&
1784 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1785 res->attr.frag_arr = new_frag_arr(res);
1792 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1795 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1797 #if PRECISE_EXC_CONTEXT
1798 if ((current_ir_graph->phase_state == phase_building) &&
1799 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1800 res->attr.frag_arr = new_frag_arr(res);
1807 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1810 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1812 #if PRECISE_EXC_CONTEXT
1813 if ((current_ir_graph->phase_state == phase_building) &&
1814 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1815 res->attr.frag_arr = new_frag_arr(res);
1822 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1825 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1827 #if PRECISE_EXC_CONTEXT
1828 if ((current_ir_graph->phase_state == phase_building) &&
1829 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1830 res->attr.frag_arr = new_frag_arr(res);
1837 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1839 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1844 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1846 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1851 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1853 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1858 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1860 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1865 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1867 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1872 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1874 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1879 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1881 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1886 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1888 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1893 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1895 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1900 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1902 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1907 new_d_Jmp (dbg_info* db)
1909 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1913 new_d_Cond (dbg_info* db, ir_node *c)
1915 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1919 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1923 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1924 store, callee, arity, in, tp);
1925 #if PRECISE_EXC_CONTEXT
1926 if ((current_ir_graph->phase_state == phase_building) &&
1927 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1928 res->attr.call.frag_arr = new_frag_arr(res);
1935 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1937 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1942 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1944 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1949 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1952 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1954 #if PRECISE_EXC_CONTEXT
1955 if ((current_ir_graph->phase_state == phase_building) &&
1956 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1957 res->attr.frag_arr = new_frag_arr(res);
1964 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1967 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1969 #if PRECISE_EXC_CONTEXT
1970 if ((current_ir_graph->phase_state == phase_building) &&
1971 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1972 res->attr.frag_arr = new_frag_arr(res);
1979 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1983 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1984 store, size, alloc_type, where);
1985 #if PRECISE_EXC_CONTEXT
1986 if ((current_ir_graph->phase_state == phase_building) &&
1987 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1988 res->attr.a.frag_arr = new_frag_arr(res);
1995 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1997 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1998 store, ptr, size, free_type);
2002 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2003 /* GL: objptr was called frame before. Frame was a bad choice for the name
2004 as the operand could as well be a pointer to a dynamic object. */
2006 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2007 store, objptr, 0, NULL, ent);
2011 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2013 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2014 store, objptr, n_index, index, sel);
2018 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2020 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2021 store, objptr, ent));
2025 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2027 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
2032 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2034 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2042 return current_ir_graph->bad;
2046 new_d_Unknown (void)
2048 return current_ir_graph->unknown;
2052 new_d_CallBegin (dbg_info *db, ir_node *call)
2055 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2060 new_d_EndReg (dbg_info *db)
2063 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2068 new_d_EndExcept (dbg_info *db)
2071 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2076 new_d_Break (dbg_info *db)
2078 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2082 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2084 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2088 /* ********************************************************************* */
2089 /* Comfortable interface with automatic Phi node construction. */
2090 /* (Uses also constructors of ?? interface, except new_Block. */
2091 /* ********************************************************************* */
2093 /** Block construction **/
2094 /* immature Block without predecessors */
2095 ir_node *new_d_immBlock (dbg_info* db) {
2098 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2099 /* creates a new dynamic in-array as length of in is -1 */
2100 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2101 current_ir_graph->current_block = res;
2102 res->attr.block.matured = 0;
2103 res->attr.block.exc = exc_normal;
2104 res->attr.block.handler_entry = 0;
2105 res->attr.block.backedge = NULL;
2106 res->attr.block.in_cg = NULL;
2107 res->attr.block.cg_backedge = NULL;
2108 set_Block_block_visited(res, 0);
2110 /* Create and initialize array for Phi-node construction. */
2111 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2112 current_ir_graph->n_loc);
2113 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2115 /* Immature block may not be optimized! */
2116 irn_vrfy_irg (res, current_ir_graph);
2123 return new_d_immBlock(NULL);
2126 /* add an adge to a jmp/control flow node */
2128 add_in_edge (ir_node *block, ir_node *jmp)
2130 if (block->attr.block.matured) {
2131 assert(0 && "Error: Block already matured!\n");
2134 assert (jmp != NULL);
2135 ARR_APP1 (ir_node *, block->in, jmp);
2139 /* changing the current block */
2141 switch_block (ir_node *target)
2143 current_ir_graph->current_block = target;
2146 /* ************************ */
2147 /* parameter administration */
2149 /* get a value from the parameter array from the current block by its index */
2151 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2153 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2154 inc_irg_visited(current_ir_graph);
2156 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2158 /* get a value from the parameter array from the current block by its index */
2160 get_value (int pos, ir_mode *mode)
2162 return get_d_value(NULL, pos, mode);
2165 /* set a value at position pos in the parameter array from the current block */
2167 set_value (int pos, ir_node *value)
2169 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2170 assert(pos+1 < current_ir_graph->n_loc);
2171 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2174 /* get the current store */
2178 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2179 /* GL: one could call get_value instead */
2180 inc_irg_visited(current_ir_graph);
2181 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2184 /* set the current store */
2186 set_store (ir_node *store)
2188 /* GL: one could call set_value instead */
2189 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2190 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2194 keep_alive (ir_node *ka)
2196 add_End_keepalive(current_ir_graph->end, ka);
2199 /** Useful access routines **/
2200 /* Returns the current block of the current graph. To set the current
2201 block use switch_block(). */
2202 ir_node *get_cur_block() {
2203 return get_irg_current_block(current_ir_graph);
2206 /* Returns the frame type of the current graph */
2207 type *get_cur_frame_type() {
2208 return get_irg_frame_type(current_ir_graph);
2212 /* ********************************************************************* */
2215 /* call once for each run of the library */
2217 init_cons (default_initialize_local_variable_func_t *func)
2219 default_initialize_local_variable = func;
2222 /* call for each graph */
2224 finalize_cons (ir_graph *irg) {
2225 irg->phase_state = phase_high;
2229 ir_node *new_Block(int arity, ir_node **in) {
2230 return new_d_Block(NULL, arity, in);
2232 ir_node *new_Start (void) {
2233 return new_d_Start(NULL);
2235 ir_node *new_End (void) {
2236 return new_d_End(NULL);
2238 ir_node *new_Jmp (void) {
2239 return new_d_Jmp(NULL);
2241 ir_node *new_Cond (ir_node *c) {
2242 return new_d_Cond(NULL, c);
2244 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2245 return new_d_Return(NULL, store, arity, in);
2247 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2248 return new_d_Raise(NULL, store, obj);
2250 ir_node *new_Const (ir_mode *mode, tarval *con) {
2251 return new_d_Const(NULL, mode, con);
2253 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2254 return new_d_SymConst(NULL, value, kind);
2256 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2257 return new_d_simpleSel(NULL, store, objptr, ent);
2259 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2261 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2263 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2264 return (new_d_InstOf (NULL, store, objptr, ent));
2266 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2268 return new_d_Call(NULL, store, callee, arity, in, tp);
2270 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2271 return new_d_Add(NULL, op1, op2, mode);
2273 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2274 return new_d_Sub(NULL, op1, op2, mode);
2276 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2277 return new_d_Minus(NULL, op, mode);
2279 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2280 return new_d_Mul(NULL, op1, op2, mode);
2282 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2283 return new_d_Quot(NULL, memop, op1, op2);
2285 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2286 return new_d_DivMod(NULL, memop, op1, op2);
2288 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2289 return new_d_Div(NULL, memop, op1, op2);
2291 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2292 return new_d_Mod(NULL, memop, op1, op2);
2294 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2295 return new_d_Abs(NULL, op, mode);
2297 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2298 return new_d_And(NULL, op1, op2, mode);
2300 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2301 return new_d_Or(NULL, op1, op2, mode);
2303 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2304 return new_d_Eor(NULL, op1, op2, mode);
2306 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2307 return new_d_Not(NULL, op, mode);
2309 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2310 return new_d_Shl(NULL, op, k, mode);
2312 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2313 return new_d_Shr(NULL, op, k, mode);
2315 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2316 return new_d_Shrs(NULL, op, k, mode);
2318 #define new_Rotate new_Rot
2319 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2320 return new_d_Rot(NULL, op, k, mode);
2322 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2323 return new_d_Cmp(NULL, op1, op2);
2325 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2326 return new_d_Conv(NULL, op, mode);
2328 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2329 return new_d_Phi(NULL, arity, in, mode);
2331 ir_node *new_Load (ir_node *store, ir_node *addr) {
2332 return new_d_Load(NULL, store, addr);
2334 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2335 return new_d_Store(NULL, store, addr, val);
2337 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2338 where_alloc where) {
2339 return new_d_Alloc(NULL, store, size, alloc_type, where);
2341 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2343 return new_d_Free(NULL, store, ptr, size, free_type);
2345 ir_node *new_Sync (int arity, ir_node **in) {
2346 return new_d_Sync(NULL, arity, in);
2348 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2349 return new_d_Proj(NULL, arg, mode, proj);
2351 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2352 return new_d_defaultProj(NULL, arg, max_proj);
2354 ir_node *new_Tuple (int arity, ir_node **in) {
2355 return new_d_Tuple(NULL, arity, in);
2357 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2358 return new_d_Id(NULL, val, mode);
2360 ir_node *new_Bad (void) {
2363 ir_node *new_Unknown(void) {
2364 return new_d_Unknown();
2366 ir_node *new_CallBegin (ir_node *callee) {
2367 return new_d_CallBegin(NULL, callee);
2369 ir_node *new_EndReg (void) {
2370 return new_d_EndReg(NULL);
2372 ir_node *new_EndExcept (void) {
2373 return new_d_EndExcept(NULL);
2375 ir_node *new_Break (void) {
2376 return new_d_Break(NULL);
2378 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2379 return new_d_Filter(NULL, arg, mode, proj);