3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* Constructs a Block with a fixed number of predecessors.
67 Does not set current_block. Can not be used with automatic
68 Phi node construction. */
70 new_bd_Block (dbg_info *db, int arity, ir_node **in)
73 ir_graph *irg = current_ir_graph;
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
86 res->attr.block.extblk = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_bd_Start (dbg_info *db, ir_node *block)
96 ir_graph *irg = current_ir_graph;
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_bd_End (dbg_info *db, ir_node *block)
109 ir_graph *irg = current_ir_graph;
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 ir_graph *irg = current_ir_graph;
125 bool has_unknown = false;
127 /* Don't assert that block matured: the use of this constructor is strongly
129 if ( get_Block_matured(block) )
130 assert( get_irn_arity(block) == arity );
132 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
134 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
136 for (i = arity-1; i >= 0; i--)
137 if (get_irn_op(in[i]) == op_Unknown) {
142 if (!has_unknown) res = optimize_node (res);
143 IRN_VRFY_IRG(res, irg);
145 /* Memory Phis in endless loops must be kept alive.
146 As we can't distinguish these easily we keep all of them alive. */
147 if ((res->op == op_Phi) && (mode == mode_M))
148 add_End_keepalive(irg->end, res);
153 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 ir_graph *irg = current_ir_graph;
158 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
159 res->attr.con.tv = con;
160 set_Const_type(res, tp); /* Call method because of complex assertion. */
161 res = optimize_node (res);
162 assert(get_Const_type(res) == tp);
163 IRN_VRFY_IRG(res, irg);
169 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
171 ir_graph *irg = current_ir_graph;
173 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
177 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
179 ir_graph *irg = current_ir_graph;
181 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
185 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
191 res = optimize_node(res);
192 IRN_VRFY_IRG(res, irg);
197 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
204 res->attr.proj = proj;
207 assert(get_Proj_pred(res));
208 assert(get_nodes_block(get_Proj_pred(res)));
210 res = optimize_node(res);
212 IRN_VRFY_IRG(res, irg);
218 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
222 ir_graph *irg = current_ir_graph;
224 assert(arg->op == op_Cond);
225 arg->attr.c.kind = fragmentary;
226 arg->attr.c.default_proj = max_proj;
227 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
232 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
235 ir_graph *irg = current_ir_graph;
237 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
238 res = optimize_node(res);
239 IRN_VRFY_IRG(res, irg);
244 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, type *to_tp)
247 ir_graph *irg = current_ir_graph;
249 assert(is_atomic_type(to_tp));
251 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
252 res->attr.cast.totype = to_tp;
253 res = optimize_node(res);
254 IRN_VRFY_IRG(res, irg);
259 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_bd_Add (dbg_info *db, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
276 ir_graph *irg = current_ir_graph;
280 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
281 res = optimize_node(res);
282 IRN_VRFY_IRG(res, irg);
287 new_bd_Sub (dbg_info *db, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
297 res = optimize_node (res);
298 IRN_VRFY_IRG(res, irg);
303 new_bd_Minus (dbg_info *db, ir_node *block,
304 ir_node *op, ir_mode *mode)
307 ir_graph *irg = current_ir_graph;
309 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
310 res = optimize_node(res);
311 IRN_VRFY_IRG(res, irg);
316 new_bd_Mul (dbg_info *db, ir_node *block,
317 ir_node *op1, ir_node *op2, ir_mode *mode)
321 ir_graph *irg = current_ir_graph;
325 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
326 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_Quot (dbg_info *db, ir_node *block,
333 ir_node *memop, ir_node *op1, ir_node *op2)
337 ir_graph *irg = current_ir_graph;
342 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
343 res = optimize_node(res);
344 IRN_VRFY_IRG(res, irg);
349 new_bd_DivMod (dbg_info *db, ir_node *block,
350 ir_node *memop, ir_node *op1, ir_node *op2)
354 ir_graph *irg = current_ir_graph;
359 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
360 res = optimize_node(res);
361 IRN_VRFY_IRG(res, irg);
366 new_bd_Div (dbg_info *db, ir_node *block,
367 ir_node *memop, ir_node *op1, ir_node *op2)
371 ir_graph *irg = current_ir_graph;
376 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
377 res = optimize_node(res);
378 IRN_VRFY_IRG(res, irg);
383 new_bd_Mod (dbg_info *db, ir_node *block,
384 ir_node *memop, ir_node *op1, ir_node *op2)
388 ir_graph *irg = current_ir_graph;
393 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
394 res = optimize_node(res);
395 IRN_VRFY_IRG(res, irg);
400 new_bd_And (dbg_info *db, ir_node *block,
401 ir_node *op1, ir_node *op2, ir_mode *mode)
405 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Or (dbg_info *db, ir_node *block,
417 ir_node *op1, ir_node *op2, ir_mode *mode)
421 ir_graph *irg = current_ir_graph;
425 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_bd_Eor (dbg_info *db, ir_node *block,
433 ir_node *op1, ir_node *op2, ir_mode *mode)
437 ir_graph *irg = current_ir_graph;
441 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
442 res = optimize_node (res);
443 IRN_VRFY_IRG(res, irg);
448 new_bd_Not (dbg_info *db, ir_node *block,
449 ir_node *op, ir_mode *mode)
452 ir_graph *irg = current_ir_graph;
454 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
455 res = optimize_node(res);
456 IRN_VRFY_IRG(res, irg);
461 new_bd_Shl (dbg_info *db, ir_node *block,
462 ir_node *op, ir_node *k, ir_mode *mode)
466 ir_graph *irg = current_ir_graph;
470 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_bd_Shr (dbg_info *db, ir_node *block,
478 ir_node *op, ir_node *k, ir_mode *mode)
482 ir_graph *irg = current_ir_graph;
486 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
487 res = optimize_node(res);
488 IRN_VRFY_IRG(res, irg);
493 new_bd_Shrs (dbg_info *db, ir_node *block,
494 ir_node *op, ir_node *k, ir_mode *mode)
498 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
503 res = optimize_node(res);
504 IRN_VRFY_IRG(res, irg);
509 new_bd_Rot (dbg_info *db, ir_node *block,
510 ir_node *op, ir_node *k, ir_mode *mode)
514 ir_graph *irg = current_ir_graph;
518 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
519 res = optimize_node(res);
520 IRN_VRFY_IRG(res, irg);
525 new_bd_Abs (dbg_info *db, ir_node *block,
526 ir_node *op, ir_mode *mode)
529 ir_graph *irg = current_ir_graph;
531 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
532 res = optimize_node (res);
533 IRN_VRFY_IRG(res, irg);
538 new_bd_Cmp (dbg_info *db, ir_node *block,
539 ir_node *op1, ir_node *op2)
543 ir_graph *irg = current_ir_graph;
548 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
549 res = optimize_node(res);
550 IRN_VRFY_IRG(res, irg);
555 new_bd_Jmp (dbg_info *db, ir_node *block)
558 ir_graph *irg = current_ir_graph;
560 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
561 res = optimize_node (res);
562 IRN_VRFY_IRG (res, irg);
567 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
570 ir_graph *irg = current_ir_graph;
572 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
573 res = optimize_node (res);
574 IRN_VRFY_IRG (res, irg);
576 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
582 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
585 ir_graph *irg = current_ir_graph;
587 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
588 res->attr.c.kind = dense;
589 res->attr.c.default_proj = 0;
590 res = optimize_node (res);
591 IRN_VRFY_IRG(res, irg);
596 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
597 ir_node *callee, int arity, ir_node **in, type *tp)
602 ir_graph *irg = current_ir_graph;
605 NEW_ARR_A(ir_node *, r_in, r_arity);
608 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
610 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
612 assert((get_unknown_type() == tp) || is_Method_type(tp));
613 set_Call_type(res, tp);
614 res->attr.call.exc.pin_state = op_pin_state_pinned;
615 res->attr.call.callee_arr = NULL;
616 res = optimize_node(res);
617 IRN_VRFY_IRG(res, irg);
622 new_bd_Return (dbg_info *db, ir_node *block,
623 ir_node *store, int arity, ir_node **in)
628 ir_graph *irg = current_ir_graph;
631 NEW_ARR_A (ir_node *, r_in, r_arity);
633 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
634 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
635 res = optimize_node(res);
636 IRN_VRFY_IRG(res, irg);
641 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
645 ir_graph *irg = current_ir_graph;
649 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
650 res = optimize_node(res);
651 IRN_VRFY_IRG(res, irg);
656 new_bd_Load (dbg_info *db, ir_node *block,
657 ir_node *store, ir_node *adr, ir_mode *mode)
661 ir_graph *irg = current_ir_graph;
665 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
666 res->attr.load.exc.pin_state = op_pin_state_pinned;
667 res->attr.load.load_mode = mode;
668 res->attr.load.volatility = volatility_non_volatile;
669 res = optimize_node(res);
670 IRN_VRFY_IRG(res, irg);
675 new_bd_Store (dbg_info *db, ir_node *block,
676 ir_node *store, ir_node *adr, ir_node *val)
680 ir_graph *irg = current_ir_graph;
685 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
686 res->attr.store.exc.pin_state = op_pin_state_pinned;
687 res->attr.store.volatility = volatility_non_volatile;
688 res = optimize_node(res);
689 IRN_VRFY_IRG(res, irg);
694 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
695 ir_node *size, type *alloc_type, where_alloc where)
699 ir_graph *irg = current_ir_graph;
703 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
704 res->attr.a.exc.pin_state = op_pin_state_pinned;
705 res->attr.a.where = where;
706 res->attr.a.type = alloc_type;
707 res = optimize_node(res);
708 IRN_VRFY_IRG(res, irg);
713 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
714 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
718 ir_graph *irg = current_ir_graph;
723 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
724 res->attr.f.where = where;
725 res->attr.f.type = free_type;
726 res = optimize_node(res);
727 IRN_VRFY_IRG(res, irg);
732 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
733 int arity, ir_node **in, entity *ent)
738 ir_graph *irg = current_ir_graph;
740 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
743 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
746 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
748 * FIXM: Sel's can select functions which should be of mode mode_P_code.
750 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
751 res->attr.s.ent = ent;
752 res = optimize_node(res);
753 IRN_VRFY_IRG(res, irg);
758 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
759 ir_node *objptr, type *ent)
764 ir_graph *irg = current_ir_graph;
767 NEW_ARR_A(ir_node *, r_in, r_arity);
771 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
772 res->attr.io.ent = ent;
774 /* res = optimize(res); */
775 IRN_VRFY_IRG(res, irg);
780 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
781 symconst_kind symkind, type *tp) {
784 ir_graph *irg = current_ir_graph;
786 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
787 mode = mode_P_data; /* FIXME: can be mode_P_code */
791 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
793 res->attr.i.num = symkind;
794 res->attr.i.sym = value;
797 res = optimize_node(res);
798 IRN_VRFY_IRG(res, irg);
803 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
804 symconst_kind symkind)
806 ir_graph *irg = current_ir_graph;
808 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
813 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
816 ir_graph *irg = current_ir_graph;
818 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
819 res = optimize_node(res);
820 IRN_VRFY_IRG(res, irg);
825 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
827 ir_node *in[2], *res;
828 ir_graph *irg = current_ir_graph;
832 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
833 res->attr.confirm_cmp = cmp;
834 res = optimize_node (res);
835 IRN_VRFY_IRG(res, irg);
839 /* this function is often called with current_ir_graph unset */
841 new_bd_Unknown (ir_mode *m)
844 ir_graph *irg = current_ir_graph;
846 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
847 res = optimize_node(res);
852 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
856 ir_graph *irg = current_ir_graph;
858 in[0] = get_Call_ptr(call);
859 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
860 /* res->attr.callbegin.irg = irg; */
861 res->attr.callbegin.call = call;
862 res = optimize_node(res);
863 IRN_VRFY_IRG(res, irg);
868 new_bd_EndReg (dbg_info *db, ir_node *block)
871 ir_graph *irg = current_ir_graph;
873 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
875 IRN_VRFY_IRG(res, irg);
880 new_bd_EndExcept (dbg_info *db, ir_node *block)
883 ir_graph *irg = current_ir_graph;
885 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
886 irg->end_except = res;
887 IRN_VRFY_IRG (res, irg);
892 new_bd_Break (dbg_info *db, ir_node *block)
895 ir_graph *irg = current_ir_graph;
897 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
898 res = optimize_node(res);
899 IRN_VRFY_IRG(res, irg);
904 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
908 ir_graph *irg = current_ir_graph;
910 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
911 res->attr.filter.proj = proj;
912 res->attr.filter.in_cg = NULL;
913 res->attr.filter.backedge = NULL;
916 assert(get_Proj_pred(res));
917 assert(get_nodes_block(get_Proj_pred(res)));
919 res = optimize_node(res);
920 IRN_VRFY_IRG(res, irg);
925 new_bd_Mux (dbg_info *db, ir_node *block,
926 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
930 ir_graph *irg = current_ir_graph;
936 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
939 res = optimize_node(res);
940 IRN_VRFY_IRG(res, irg);
944 /* --------------------------------------------- */
945 /* private interfaces, for professional use only */
946 /* --------------------------------------------- */
948 /* Constructs a Block with a fixed number of predecessors.
949 Does not set current_block. Can not be used with automatic
950 Phi node construction. */
952 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
954 ir_graph *rem = current_ir_graph;
957 current_ir_graph = irg;
958 res = new_bd_Block (db, arity, in);
959 current_ir_graph = rem;
965 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
967 ir_graph *rem = current_ir_graph;
970 current_ir_graph = irg;
971 res = new_bd_Start (db, block);
972 current_ir_graph = rem;
978 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
981 ir_graph *rem = current_ir_graph;
983 current_ir_graph = rem;
984 res = new_bd_End (db, block);
985 current_ir_graph = rem;
990 /* Creates a Phi node with all predecessors. Calling this constructor
991 is only allowed if the corresponding block is mature. */
993 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
996 ir_graph *rem = current_ir_graph;
998 current_ir_graph = irg;
999 res = new_bd_Phi (db, block,arity, in, mode);
1000 current_ir_graph = rem;
1006 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
1009 ir_graph *rem = current_ir_graph;
1011 current_ir_graph = irg;
1012 res = new_bd_Const_type (db, block, mode, con, tp);
1013 current_ir_graph = rem;
1019 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
1022 ir_graph *rem = current_ir_graph;
1024 current_ir_graph = irg;
1025 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
1026 current_ir_graph = rem;
1032 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
1034 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
1038 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
1041 ir_graph *rem = current_ir_graph;
1043 current_ir_graph = irg;
1044 res = new_bd_Id(db, block, val, mode);
1045 current_ir_graph = rem;
1051 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1055 ir_graph *rem = current_ir_graph;
1057 current_ir_graph = irg;
1058 res = new_bd_Proj(db, block, arg, mode, proj);
1059 current_ir_graph = rem;
1065 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1069 ir_graph *rem = current_ir_graph;
1071 current_ir_graph = irg;
1072 res = new_bd_defaultProj(db, block, arg, max_proj);
1073 current_ir_graph = rem;
1079 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1082 ir_graph *rem = current_ir_graph;
1084 current_ir_graph = irg;
1085 res = new_bd_Conv(db, block, op, mode);
1086 current_ir_graph = rem;
1092 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
1095 ir_graph *rem = current_ir_graph;
1097 current_ir_graph = irg;
1098 res = new_bd_Cast(db, block, op, to_tp);
1099 current_ir_graph = rem;
1105 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1108 ir_graph *rem = current_ir_graph;
1110 current_ir_graph = irg;
1111 res = new_bd_Tuple(db, block, arity, in);
1112 current_ir_graph = rem;
1118 new_rd_Add (dbg_info *db, ir_graph *irg, ir_node *block,
1119 ir_node *op1, ir_node *op2, ir_mode *mode)
1122 ir_graph *rem = current_ir_graph;
1124 current_ir_graph = irg;
1125 res = new_bd_Add(db, block, op1, op2, mode);
1126 current_ir_graph = rem;
1132 new_rd_Sub (dbg_info *db, ir_graph *irg, ir_node *block,
1133 ir_node *op1, ir_node *op2, ir_mode *mode)
1136 ir_graph *rem = current_ir_graph;
1138 current_ir_graph = irg;
1139 res = new_bd_Sub(db, block, op1, op2, mode);
1140 current_ir_graph = rem;
1146 new_rd_Minus (dbg_info *db, ir_graph *irg, ir_node *block,
1147 ir_node *op, ir_mode *mode)
1150 ir_graph *rem = current_ir_graph;
1152 current_ir_graph = irg;
1153 res = new_bd_Minus(db, block, op, mode);
1154 current_ir_graph = rem;
1160 new_rd_Mul (dbg_info *db, ir_graph *irg, ir_node *block,
1161 ir_node *op1, ir_node *op2, ir_mode *mode)
1164 ir_graph *rem = current_ir_graph;
1166 current_ir_graph = irg;
1167 res = new_bd_Mul(db, block, op1, op2, mode);
1168 current_ir_graph = rem;
1174 new_rd_Quot (dbg_info *db, ir_graph *irg, ir_node *block,
1175 ir_node *memop, ir_node *op1, ir_node *op2)
1178 ir_graph *rem = current_ir_graph;
1180 current_ir_graph = irg;
1181 res = new_bd_Quot(db, block, memop, op1, op2);
1182 current_ir_graph = rem;
1188 new_rd_DivMod (dbg_info *db, ir_graph *irg, ir_node *block,
1189 ir_node *memop, ir_node *op1, ir_node *op2)
1192 ir_graph *rem = current_ir_graph;
1194 current_ir_graph = irg;
1195 res = new_bd_DivMod(db, block, memop, op1, op2);
1196 current_ir_graph = rem;
1202 new_rd_Div (dbg_info *db, ir_graph *irg, ir_node *block,
1203 ir_node *memop, ir_node *op1, ir_node *op2)
1206 ir_graph *rem = current_ir_graph;
1208 current_ir_graph = irg;
1209 res = new_bd_Div (db, block, memop, op1, op2);
1210 current_ir_graph =rem;
1216 new_rd_Mod (dbg_info *db, ir_graph *irg, ir_node *block,
1217 ir_node *memop, ir_node *op1, ir_node *op2)
1220 ir_graph *rem = current_ir_graph;
1222 current_ir_graph = irg;
1223 res = new_bd_Mod(db, block, memop, op1, op2);
1224 current_ir_graph = rem;
1230 new_rd_And (dbg_info *db, ir_graph *irg, ir_node *block,
1231 ir_node *op1, ir_node *op2, ir_mode *mode)
1234 ir_graph *rem = current_ir_graph;
1236 current_ir_graph = irg;
1237 res = new_bd_And(db, block, op1, op2, mode);
1238 current_ir_graph = rem;
1244 new_rd_Or (dbg_info *db, ir_graph *irg, ir_node *block,
1245 ir_node *op1, ir_node *op2, ir_mode *mode)
1248 ir_graph *rem = current_ir_graph;
1250 current_ir_graph = irg;
1251 res = new_bd_Or(db, block, op1, op2, mode);
1252 current_ir_graph = rem;
1258 new_rd_Eor (dbg_info *db, ir_graph *irg, ir_node *block,
1259 ir_node *op1, ir_node *op2, ir_mode *mode)
1262 ir_graph *rem = current_ir_graph;
1264 current_ir_graph = irg;
1265 res = new_bd_Eor(db, block, op1, op2, mode);
1266 current_ir_graph = rem;
1272 new_rd_Not (dbg_info *db, ir_graph *irg, ir_node *block,
1273 ir_node *op, ir_mode *mode)
1276 ir_graph *rem = current_ir_graph;
1278 current_ir_graph = irg;
1279 res = new_bd_Not(db, block, op, mode);
1280 current_ir_graph = rem;
1286 new_rd_Shl (dbg_info *db, ir_graph *irg, ir_node *block,
1287 ir_node *op, ir_node *k, ir_mode *mode)
1290 ir_graph *rem = current_ir_graph;
1292 current_ir_graph = irg;
1293 res = new_bd_Shl (db, block, op, k, mode);
1294 current_ir_graph = rem;
1300 new_rd_Shr (dbg_info *db, ir_graph *irg, ir_node *block,
1301 ir_node *op, ir_node *k, ir_mode *mode)
1304 ir_graph *rem = current_ir_graph;
1306 current_ir_graph = irg;
1307 res = new_bd_Shr(db, block, op, k, mode);
1308 current_ir_graph = rem;
1314 new_rd_Shrs (dbg_info *db, ir_graph *irg, ir_node *block,
1315 ir_node *op, ir_node *k, ir_mode *mode)
1318 ir_graph *rem = current_ir_graph;
1320 current_ir_graph = irg;
1321 res = new_bd_Shrs(db, block, op, k, mode);
1322 current_ir_graph = rem;
1328 new_rd_Rot (dbg_info *db, ir_graph *irg, ir_node *block,
1329 ir_node *op, ir_node *k, ir_mode *mode)
1332 ir_graph *rem = current_ir_graph;
1334 current_ir_graph = irg;
1335 res = new_bd_Rot(db, block, op, k, mode);
1336 current_ir_graph = rem;
1342 new_rd_Abs (dbg_info *db, ir_graph *irg, ir_node *block,
1343 ir_node *op, ir_mode *mode)
1346 ir_graph *rem = current_ir_graph;
1348 current_ir_graph = irg;
1349 res = new_bd_Abs(db, block, op, mode);
1350 current_ir_graph = rem;
1356 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1357 ir_node *op1, ir_node *op2)
1360 ir_graph *rem = current_ir_graph;
1362 current_ir_graph = irg;
1363 res = new_bd_Cmp(db, block, op1, op2);
1364 current_ir_graph = rem;
1370 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1373 ir_graph *rem = current_ir_graph;
1375 current_ir_graph = irg;
1376 res = new_bd_Jmp(db, block);
1377 current_ir_graph = rem;
1383 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1386 ir_graph *rem = current_ir_graph;
1388 current_ir_graph = irg;
1389 res = new_bd_IJmp(db, block, tgt);
1390 current_ir_graph = rem;
1396 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1399 ir_graph *rem = current_ir_graph;
1401 current_ir_graph = irg;
1402 res = new_bd_Cond(db, block, c);
1403 current_ir_graph = rem;
1409 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1410 ir_node *callee, int arity, ir_node **in, type *tp)
1413 ir_graph *rem = current_ir_graph;
1415 current_ir_graph = irg;
1416 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1417 current_ir_graph = rem;
1423 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1424 ir_node *store, int arity, ir_node **in)
1427 ir_graph *rem = current_ir_graph;
1429 current_ir_graph = irg;
1430 res = new_bd_Return(db, block, store, arity, in);
1431 current_ir_graph = rem;
1437 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1440 ir_graph *rem = current_ir_graph;
1442 current_ir_graph = irg;
1443 res = new_bd_Raise(db, block, store, obj);
1444 current_ir_graph = rem;
1450 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1451 ir_node *store, ir_node *adr, ir_mode *mode)
1454 ir_graph *rem = current_ir_graph;
1456 current_ir_graph = irg;
1457 res = new_bd_Load(db, block, store, adr, mode);
1458 current_ir_graph = rem;
1464 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1465 ir_node *store, ir_node *adr, ir_node *val)
1468 ir_graph *rem = current_ir_graph;
1470 current_ir_graph = irg;
1471 res = new_bd_Store(db, block, store, adr, val);
1472 current_ir_graph = rem;
1478 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1479 ir_node *size, type *alloc_type, where_alloc where)
1482 ir_graph *rem = current_ir_graph;
1484 current_ir_graph = irg;
1485 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1486 current_ir_graph = rem;
1492 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1493 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
1496 ir_graph *rem = current_ir_graph;
1498 current_ir_graph = irg;
1499 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1500 current_ir_graph = rem;
1506 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1507 int arity, ir_node **in, entity *ent)
1510 ir_graph *rem = current_ir_graph;
1512 current_ir_graph = irg;
1513 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1514 current_ir_graph = rem;
1520 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1521 ir_node *objptr, type *ent)
1524 ir_graph *rem = current_ir_graph;
1526 current_ir_graph = irg;
1527 res = new_bd_InstOf(db, block, store, objptr, ent);
1528 current_ir_graph = rem;
1534 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1535 symconst_kind symkind, type *tp)
1538 ir_graph *rem = current_ir_graph;
1540 current_ir_graph = irg;
1541 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1542 current_ir_graph = rem;
1548 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1549 symconst_kind symkind)
1551 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1555 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp)
1557 symconst_symbol sym = {(type *)symbol};
1558 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1561 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
1562 symconst_symbol sym = {(type *)symbol};
1563 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1566 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1567 symconst_symbol sym = {symbol};
1568 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1571 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1572 symconst_symbol sym = {symbol};
1573 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1577 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1580 ir_graph *rem = current_ir_graph;
1582 current_ir_graph = irg;
1583 res = new_bd_Sync(db, block, arity, in);
1584 current_ir_graph = rem;
1590 new_rd_Bad (ir_graph *irg)
1596 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1599 ir_graph *rem = current_ir_graph;
1601 current_ir_graph = irg;
1602 res = new_bd_Confirm(db, block, val, bound, cmp);
1603 current_ir_graph = rem;
1608 /* this function is often called with current_ir_graph unset */
1610 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1613 ir_graph *rem = current_ir_graph;
1615 current_ir_graph = irg;
1616 res = new_bd_Unknown(m);
1617 current_ir_graph = rem;
1623 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1626 ir_graph *rem = current_ir_graph;
1628 current_ir_graph = irg;
1629 res = new_bd_CallBegin(db, block, call);
1630 current_ir_graph = rem;
1636 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1640 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1642 IRN_VRFY_IRG(res, irg);
1647 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1651 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1652 irg->end_except = res;
1653 IRN_VRFY_IRG (res, irg);
1658 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1661 ir_graph *rem = current_ir_graph;
1663 current_ir_graph = irg;
1664 res = new_bd_Break(db, block);
1665 current_ir_graph = rem;
1671 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1675 ir_graph *rem = current_ir_graph;
1677 current_ir_graph = irg;
1678 res = new_bd_Filter(db, block, arg, mode, proj);
1679 current_ir_graph = rem;
1685 new_rd_NoMem (ir_graph *irg) {
1690 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1691 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1694 ir_graph *rem = current_ir_graph;
1696 current_ir_graph = irg;
1697 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1698 current_ir_graph = rem;
1704 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1705 return new_rd_Block(NULL, irg, arity, in);
1707 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1708 return new_rd_Start(NULL, irg, block);
1710 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1711 return new_rd_End(NULL, irg, block);
1713 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1714 return new_rd_Jmp(NULL, irg, block);
1716 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1717 return new_rd_IJmp(NULL, irg, block, tgt);
1719 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1720 return new_rd_Cond(NULL, irg, block, c);
1722 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1723 ir_node *store, int arity, ir_node **in) {
1724 return new_rd_Return(NULL, irg, block, store, arity, in);
1726 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1727 ir_node *store, ir_node *obj) {
1728 return new_rd_Raise(NULL, irg, block, store, obj);
1730 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1731 ir_mode *mode, tarval *con) {
1732 return new_rd_Const(NULL, irg, block, mode, con);
1735 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1736 ir_mode *mode, long value) {
1737 return new_rd_Const_long(NULL, irg, block, mode, value);
1740 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1741 ir_mode *mode, tarval *con, type *tp) {
1742 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1745 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1746 symconst_symbol value, symconst_kind symkind) {
1747 return new_rd_SymConst(NULL, irg, block, value, symkind);
1749 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1750 ir_node *objptr, int n_index, ir_node **index,
1752 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1754 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1756 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
1758 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1759 ir_node *callee, int arity, ir_node **in,
1761 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1763 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1764 ir_node *op1, ir_node *op2, ir_mode *mode) {
1765 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1767 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1768 ir_node *op1, ir_node *op2, ir_mode *mode) {
1769 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1771 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1772 ir_node *op, ir_mode *mode) {
1773 return new_rd_Minus(NULL, irg, block, op, mode);
1775 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1776 ir_node *op1, ir_node *op2, ir_mode *mode) {
1777 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1779 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1780 ir_node *memop, ir_node *op1, ir_node *op2) {
1781 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1783 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1784 ir_node *memop, ir_node *op1, ir_node *op2) {
1785 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1787 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1788 ir_node *memop, ir_node *op1, ir_node *op2) {
1789 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1791 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1792 ir_node *memop, ir_node *op1, ir_node *op2) {
1793 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1795 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1796 ir_node *op, ir_mode *mode) {
1797 return new_rd_Abs(NULL, irg, block, op, mode);
1799 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1800 ir_node *op1, ir_node *op2, ir_mode *mode) {
1801 return new_rd_And(NULL, irg, block, op1, op2, mode);
1803 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1804 ir_node *op1, ir_node *op2, ir_mode *mode) {
1805 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1807 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1808 ir_node *op1, ir_node *op2, ir_mode *mode) {
1809 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1811 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1812 ir_node *op, ir_mode *mode) {
1813 return new_rd_Not(NULL, irg, block, op, mode);
1815 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1816 ir_node *op1, ir_node *op2) {
1817 return new_rd_Cmp(NULL, irg, block, op1, op2);
1819 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1820 ir_node *op, ir_node *k, ir_mode *mode) {
1821 return new_rd_Shl(NULL, irg, block, op, k, mode);
1823 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1824 ir_node *op, ir_node *k, ir_mode *mode) {
1825 return new_rd_Shr(NULL, irg, block, op, k, mode);
1827 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1828 ir_node *op, ir_node *k, ir_mode *mode) {
1829 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1831 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1832 ir_node *op, ir_node *k, ir_mode *mode) {
1833 return new_rd_Rot(NULL, irg, block, op, k, mode);
1835 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1836 ir_node *op, ir_mode *mode) {
1837 return new_rd_Conv(NULL, irg, block, op, mode);
1839 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1840 return new_rd_Cast(NULL, irg, block, op, to_tp);
1842 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1843 ir_node **in, ir_mode *mode) {
1844 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1846 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1847 ir_node *store, ir_node *adr, ir_mode *mode) {
1848 return new_rd_Load(NULL, irg, block, store, adr, mode);
1850 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1851 ir_node *store, ir_node *adr, ir_node *val) {
1852 return new_rd_Store(NULL, irg, block, store, adr, val);
1854 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1855 ir_node *size, type *alloc_type, where_alloc where) {
1856 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1858 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1859 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1860 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1862 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1863 return new_rd_Sync(NULL, irg, block, arity, in);
1865 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1866 ir_mode *mode, long proj) {
1867 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1869 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1871 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1873 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1874 int arity, ir_node **in) {
1875 return new_rd_Tuple(NULL, irg, block, arity, in );
1877 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1878 ir_node *val, ir_mode *mode) {
1879 return new_rd_Id(NULL, irg, block, val, mode);
1881 ir_node *new_r_Bad (ir_graph *irg) {
1882 return new_rd_Bad(irg);
1884 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1885 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1887 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1888 return new_rd_Unknown(irg, m);
1890 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1891 return new_rd_CallBegin(NULL, irg, block, callee);
1893 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1894 return new_rd_EndReg(NULL, irg, block);
1896 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1897 return new_rd_EndExcept(NULL, irg, block);
1899 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1900 return new_rd_Break(NULL, irg, block);
1902 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1903 ir_mode *mode, long proj) {
1904 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1906 ir_node *new_r_NoMem (ir_graph *irg) {
1907 return new_rd_NoMem(irg);
1909 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1910 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1911 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1915 /** ********************/
1916 /** public interfaces */
1917 /** construction tools */
1921 * - create a new Start node in the current block
1923 * @return s - pointer to the created Start node
1928 new_d_Start (dbg_info *db)
1932 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1933 op_Start, mode_T, 0, NULL);
1934 /* res->attr.start.irg = current_ir_graph; */
1936 res = optimize_node(res);
1937 IRN_VRFY_IRG(res, current_ir_graph);
1942 new_d_End (dbg_info *db)
1945 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1946 op_End, mode_X, -1, NULL);
1947 res = optimize_node(res);
1948 IRN_VRFY_IRG(res, current_ir_graph);
1953 /* Constructs a Block with a fixed number of predecessors.
1954 Does set current_block. Can be used with automatic Phi
1955 node construction. */
1957 new_d_Block (dbg_info *db, int arity, ir_node **in)
1961 bool has_unknown = false;
1963 res = new_bd_Block(db, arity, in);
1965 /* Create and initialize array for Phi-node construction. */
1966 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1967 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1968 current_ir_graph->n_loc);
1969 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1972 for (i = arity-1; i >= 0; i--)
1973 if (get_irn_op(in[i]) == op_Unknown) {
1978 if (!has_unknown) res = optimize_node(res);
1979 current_ir_graph->current_block = res;
1981 IRN_VRFY_IRG(res, current_ir_graph);
1986 /* ***********************************************************************/
1987 /* Methods necessary for automatic Phi node creation */
1989 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1990 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1991 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1992 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1994 Call Graph: ( A ---> B == A "calls" B)
1996 get_value mature_immBlock
2004 get_r_value_internal |
2008 new_rd_Phi0 new_rd_Phi_in
2010 * *************************************************************************** */
2012 /** Creates a Phi node with 0 predecessors */
2013 static INLINE ir_node *
2014 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2018 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
2019 IRN_VRFY_IRG(res, irg);
2023 /* There are two implementations of the Phi node construction. The first
2024 is faster, but does not work for blocks with more than 2 predecessors.
2025 The second works always but is slower and causes more unnecessary Phi
2027 Select the implementations by the following preprocessor flag set in
2029 #if USE_FAST_PHI_CONSTRUCTION
2031 /* This is a stack used for allocating and deallocating nodes in
2032 new_rd_Phi_in. The original implementation used the obstack
2033 to model this stack, now it is explicit. This reduces side effects.
2035 #if USE_EXPLICIT_PHI_IN_STACK
2037 new_Phi_in_stack(void) {
2040 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
2042 res->stack = NEW_ARR_F (ir_node *, 0);
2049 free_Phi_in_stack(Phi_in_stack *s) {
2050 DEL_ARR_F(s->stack);
2054 free_to_Phi_in_stack(ir_node *phi) {
2055 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
2056 current_ir_graph->Phi_in_stack->pos)
2057 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
2059 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
2061 (current_ir_graph->Phi_in_stack->pos)++;
2064 static INLINE ir_node *
2065 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
2066 int arity, ir_node **in) {
2068 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
2069 int pos = current_ir_graph->Phi_in_stack->pos;
2073 /* We need to allocate a new node */
2074 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
2075 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
2077 /* reuse the old node and initialize it again. */
2080 assert (res->kind == k_ir_node);
2081 assert (res->op == op_Phi);
2085 assert (arity >= 0);
2086 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
2087 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
2089 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
2091 (current_ir_graph->Phi_in_stack->pos)--;
2095 #endif /* USE_EXPLICIT_PHI_IN_STACK */
2097 /* Creates a Phi node with a given, fixed array **in of predecessors.
2098 If the Phi node is unnecessary, as the same value reaches the block
2099 through all control flow paths, it is eliminated and the value
2100 returned directly. This constructor is only intended for use in
2101 the automatic Phi node generation triggered by get_value or mature.
2102 The implementation is quite tricky and depends on the fact, that
2103 the nodes are allocated on a stack:
2104 The in array contains predecessors and NULLs. The NULLs appear,
2105 if get_r_value_internal, that computed the predecessors, reached
2106 the same block on two paths. In this case the same value reaches
2107 this block on both paths, there is no definition in between. We need
2108 not allocate a Phi where these path's merge, but we have to communicate
2109 this fact to the caller. This happens by returning a pointer to the
2110 node the caller _will_ allocate. (Yes, we predict the address. We can
2111 do so because the nodes are allocated on the obstack.) The caller then
2112 finds a pointer to itself and, when this routine is called again,
2115 static INLINE ir_node *
2116 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2119 ir_node *res, *known;
2121 /* Allocate a new node on the obstack. This can return a node to
2122 which some of the pointers in the in-array already point.
2123 Attention: the constructor copies the in array, i.e., the later
2124 changes to the array in this routine do not affect the
2125 constructed node! If the in array contains NULLs, there will be
2126 missing predecessors in the returned node. Is this a possible
2127 internal state of the Phi node generation? */
2128 #if USE_EXPLICIT_PHI_IN_STACK
2129 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
2131 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2132 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2135 /* The in-array can contain NULLs. These were returned by
2136 get_r_value_internal if it reached the same block/definition on a
2137 second path. The NULLs are replaced by the node itself to
2138 simplify the test in the next loop. */
2139 for (i = 0; i < ins; ++i) {
2144 /* This loop checks whether the Phi has more than one predecessor.
2145 If so, it is a real Phi node and we break the loop. Else the Phi
2146 node merges the same definition on several paths and therefore is
2148 for (i = 0; i < ins; ++i) {
2149 if (in[i] == res || in[i] == known)
2158 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2160 #if USE_EXPLICIT_PHI_IN_STACK
2161 free_to_Phi_in_stack(res);
2163 edges_node_deleted(res, current_ir_graph);
2164 obstack_free(current_ir_graph->obst, res);
2168 res = optimize_node (res);
2169 IRN_VRFY_IRG(res, irg);
2172 /* return the pointer to the Phi node. This node might be deallocated! */
2177 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2180 allocates and returns this node. The routine called to allocate the
2181 node might optimize it away and return a real value, or even a pointer
2182 to a deallocated Phi node on top of the obstack!
2183 This function is called with an in-array of proper size. **/
2185 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2187 ir_node *prevBlock, *res;
2190 /* This loop goes to all predecessor blocks of the block the Phi node is in
2191 and there finds the operands of the Phi node by calling
2192 get_r_value_internal. */
2193 for (i = 1; i <= ins; ++i) {
2194 assert (block->in[i]);
2195 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2197 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2200 /* After collecting all predecessors into the array nin a new Phi node
2201 with these predecessors is created. This constructor contains an
2202 optimization: If all predecessors of the Phi node are identical it
2203 returns the only operand instead of a new Phi node. If the value
2204 passes two different control flow edges without being defined, and
2205 this is the second path treated, a pointer to the node that will be
2206 allocated for the first path (recursion) is returned. We already
2207 know the address of this node, as it is the next node to be allocated
2208 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2209 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2211 /* Now we now the value for "pos" and can enter it in the array with
2212 all known local variables. Attention: this might be a pointer to
2213 a node, that later will be allocated!!! See new_rd_Phi_in.
2214 If this is called in mature, after some set_value in the same block,
2215 the proper value must not be overwritten:
2217 get_value (makes Phi0, put's it into graph_arr)
2218 set_value (overwrites Phi0 in graph_arr)
2219 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2222 if (!block->attr.block.graph_arr[pos]) {
2223 block->attr.block.graph_arr[pos] = res;
2225 /* printf(" value already computed by %s\n",
2226 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2232 /* This function returns the last definition of a variable. In case
2233 this variable was last defined in a previous block, Phi nodes are
2234 inserted. If the part of the firm graph containing the definition
2235 is not yet constructed, a dummy Phi node is returned. */
2237 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2240 /* There are 4 cases to treat.
2242 1. The block is not mature and we visit it the first time. We can not
2243 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2244 predecessors is returned. This node is added to the linked list (field
2245 "link") of the containing block to be completed when this block is
2246 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2249 2. The value is already known in this block, graph_arr[pos] is set and we
2250 visit the block the first time. We can return the value without
2251 creating any new nodes.
2253 3. The block is mature and we visit it the first time. A Phi node needs
2254 to be created (phi_merge). If the Phi is not needed, as all it's
2255 operands are the same value reaching the block through different
2256 paths, it's optimized away and the value itself is returned.
2258 4. The block is mature, and we visit it the second time. Now two
2259 subcases are possible:
2260 * The value was computed completely the last time we were here. This
2261 is the case if there is no loop. We can return the proper value.
2262 * The recursion that visited this node and set the flag did not
2263 return yet. We are computing a value in a loop and need to
2264 break the recursion without knowing the result yet.
2265 @@@ strange case. Straight forward we would create a Phi before
2266 starting the computation of it's predecessors. In this case we will
2267 find a Phi here in any case. The problem is that this implementation
2268 only creates a Phi after computing the predecessors, so that it is
2269 hard to compute self references of this Phi. @@@
2270 There is no simple check for the second subcase. Therefore we check
2271 for a second visit and treat all such cases as the second subcase.
2272 Anyways, the basic situation is the same: we reached a block
2273 on two paths without finding a definition of the value: No Phi
2274 nodes are needed on both paths.
2275 We return this information "Two paths, no Phi needed" by a very tricky
2276 implementation that relies on the fact that an obstack is a stack and
2277 will return a node with the same address on different allocations.
2278 Look also at phi_merge and new_rd_phi_in to understand this.
2279 @@@ Unfortunately this does not work, see testprogram
2280 three_cfpred_example.
2284 /* case 4 -- already visited. */
2285 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2287 /* visited the first time */
2288 set_irn_visited(block, get_irg_visited(current_ir_graph));
2290 /* Get the local valid value */
2291 res = block->attr.block.graph_arr[pos];
2293 /* case 2 -- If the value is actually computed, return it. */
2294 if (res) return res;
2296 if (block->attr.block.matured) { /* case 3 */
2298 /* The Phi has the same amount of ins as the corresponding block. */
2299 int ins = get_irn_arity(block);
2301 NEW_ARR_A (ir_node *, nin, ins);
2303 /* Phi merge collects the predecessors and then creates a node. */
2304 res = phi_merge (block, pos, mode, nin, ins);
2306 } else { /* case 1 */
2307 /* The block is not mature, we don't know how many in's are needed. A Phi
2308 with zero predecessors is created. Such a Phi node is called Phi0
2309 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2310 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2312 The Phi0 has to remember the pos of it's internal value. If the real
2313 Phi is computed, pos is used to update the array with the local
2316 res = new_rd_Phi0 (current_ir_graph, block, mode);
2317 res->attr.phi0_pos = pos;
2318 res->link = block->link;
2322 /* If we get here, the frontend missed a use-before-definition error */
2325 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2326 assert (mode->code >= irm_F && mode->code <= irm_P);
2327 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2328 tarval_mode_null[mode->code]);
2331 /* The local valid value is available now. */
2332 block->attr.block.graph_arr[pos] = res;
2340 it starts the recursion. This causes an Id at the entry of
2341 every block that has no definition of the value! **/
2343 #if USE_EXPLICIT_PHI_IN_STACK
2345 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2346 void free_Phi_in_stack(Phi_in_stack *s) { }
2349 static INLINE ir_node *
2350 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2351 ir_node **in, int ins, ir_node *phi0)
2354 ir_node *res, *known;
2356 /* Allocate a new node on the obstack. The allocation copies the in
2358 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2359 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2361 /* This loop checks whether the Phi has more than one predecessor.
2362 If so, it is a real Phi node and we break the loop. Else the
2363 Phi node merges the same definition on several paths and therefore
2364 is not needed. Don't consider Bad nodes! */
2366 for (i=0; i < ins; ++i)
2370 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2372 /* Optimize self referencing Phis: We can't detect them yet properly, as
2373 they still refer to the Phi0 they will replace. So replace right now. */
2374 if (phi0 && in[i] == phi0) in[i] = res;
2376 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2384 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2387 edges_node_deleted(res, current_ir_graph);
2388 obstack_free (current_ir_graph->obst, res);
2389 if (is_Phi(known)) {
2390 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2391 order, an enclosing Phi know may get superfluous. */
2392 res = optimize_in_place_2(known);
2394 exchange(known, res);
2400 /* A undefined value, e.g., in unreachable code. */
2404 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2405 IRN_VRFY_IRG(res, irg);
2406 /* Memory Phis in endless loops must be kept alive.
2407 As we can't distinguish these easily we keep all of them alive. */
2408 if ((res->op == op_Phi) && (mode == mode_M))
2409 add_End_keepalive(irg->end, res);
2416 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2418 #if PRECISE_EXC_CONTEXT
2420 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2422 /* Construct a new frag_array for node n.
2423 Copy the content from the current graph_arr of the corresponding block:
2424 this is the current state.
2425 Set ProjM(n) as current memory state.
2426 Further the last entry in frag_arr of current block points to n. This
2427 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2429 static INLINE ir_node ** new_frag_arr (ir_node *n)
2434 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2435 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2436 sizeof(ir_node *)*current_ir_graph->n_loc);
2438 /* turn off optimization before allocating Proj nodes, as res isn't
2440 opt = get_opt_optimize(); set_optimize(0);
2441 /* Here we rely on the fact that all frag ops have Memory as first result! */
2442 if (get_irn_op(n) == op_Call)
2443 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2445 assert((pn_Quot_M == pn_DivMod_M) &&
2446 (pn_Quot_M == pn_Div_M) &&
2447 (pn_Quot_M == pn_Mod_M) &&
2448 (pn_Quot_M == pn_Load_M) &&
2449 (pn_Quot_M == pn_Store_M) &&
2450 (pn_Quot_M == pn_Alloc_M) );
2451 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2455 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2460 * returns the frag_arr from a node
2462 static INLINE ir_node **
2463 get_frag_arr (ir_node *n) {
2464 switch (get_irn_opcode(n)) {
2466 return n->attr.call.exc.frag_arr;
2468 return n->attr.a.exc.frag_arr;
2470 return n->attr.load.exc.frag_arr;
2472 return n->attr.store.exc.frag_arr;
2474 return n->attr.except.frag_arr;
2479 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2481 if (!frag_arr[pos]) frag_arr[pos] = val;
2482 if (frag_arr[current_ir_graph->n_loc - 1]) {
2483 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2484 assert(arr != frag_arr && "Endless recursion detected");
2485 set_frag_value(arr, pos, val);
2490 for (i = 0; i < 1000; ++i) {
2491 if (!frag_arr[pos]) {
2492 frag_arr[pos] = val;
2494 if (frag_arr[current_ir_graph->n_loc - 1]) {
2495 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2501 assert(0 && "potential endless recursion");
2506 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2510 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2512 frag_arr = get_frag_arr(cfOp);
2513 res = frag_arr[pos];
2515 if (block->attr.block.graph_arr[pos]) {
2516 /* There was a set_value after the cfOp and no get_value before that
2517 set_value. We must build a Phi node now. */
2518 if (block->attr.block.matured) {
2519 int ins = get_irn_arity(block);
2521 NEW_ARR_A (ir_node *, nin, ins);
2522 res = phi_merge(block, pos, mode, nin, ins);
2524 res = new_rd_Phi0 (current_ir_graph, block, mode);
2525 res->attr.phi0_pos = pos;
2526 res->link = block->link;
2530 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2531 but this should be better: (remove comment if this works) */
2532 /* It's a Phi, we can write this into all graph_arrs with NULL */
2533 set_frag_value(block->attr.block.graph_arr, pos, res);
2535 res = get_r_value_internal(block, pos, mode);
2536 set_frag_value(block->attr.block.graph_arr, pos, res);
2544 computes the predecessors for the real phi node, and then
2545 allocates and returns this node. The routine called to allocate the
2546 node might optimize it away and return a real value.
2547 This function must be called with an in-array of proper size. **/
2549 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2551 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2554 /* If this block has no value at pos create a Phi0 and remember it
2555 in graph_arr to break recursions.
2556 Else we may not set graph_arr as there a later value is remembered. */
2558 if (!block->attr.block.graph_arr[pos]) {
2559 if (block == get_irg_start_block(current_ir_graph)) {
2560 /* Collapsing to Bad tarvals is no good idea.
2561 So we call a user-supplied routine here that deals with this case as
2562 appropriate for the given language. Sorrily the only help we can give
2563 here is the position.
2565 Even if all variables are defined before use, it can happen that
2566 we get to the start block, if a Cond has been replaced by a tuple
2567 (bad, jmp). In this case we call the function needlessly, eventually
2568 generating an non existent error.
2569 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2572 if (default_initialize_local_variable)
2573 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2575 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2576 /* We don't need to care about exception ops in the start block.
2577 There are none by definition. */
2578 return block->attr.block.graph_arr[pos];
2580 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2581 block->attr.block.graph_arr[pos] = phi0;
2582 #if PRECISE_EXC_CONTEXT
2583 if (get_opt_precise_exc_context()) {
2584 /* Set graph_arr for fragile ops. Also here we should break recursion.
2585 We could choose a cyclic path through an cfop. But the recursion would
2586 break at some point. */
2587 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2593 /* This loop goes to all predecessor blocks of the block the Phi node
2594 is in and there finds the operands of the Phi node by calling
2595 get_r_value_internal. */
2596 for (i = 1; i <= ins; ++i) {
2597 prevCfOp = skip_Proj(block->in[i]);
2599 if (is_Bad(prevCfOp)) {
2600 /* In case a Cond has been optimized we would get right to the start block
2601 with an invalid definition. */
2602 nin[i-1] = new_Bad();
2605 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2607 if (!is_Bad(prevBlock)) {
2608 #if PRECISE_EXC_CONTEXT
2609 if (get_opt_precise_exc_context() &&
2610 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2611 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2612 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2615 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2617 nin[i-1] = new_Bad();
2621 /* We want to pass the Phi0 node to the constructor: this finds additional
2622 optimization possibilities.
2623 The Phi0 node either is allocated in this function, or it comes from
2624 a former call to get_r_value_internal. In this case we may not yet
2625 exchange phi0, as this is done in mature_immBlock. */
2627 phi0_all = block->attr.block.graph_arr[pos];
2628 if (!((get_irn_op(phi0_all) == op_Phi) &&
2629 (get_irn_arity(phi0_all) == 0) &&
2630 (get_nodes_block(phi0_all) == block)))
2636 /* After collecting all predecessors into the array nin a new Phi node
2637 with these predecessors is created. This constructor contains an
2638 optimization: If all predecessors of the Phi node are identical it
2639 returns the only operand instead of a new Phi node. */
2640 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2642 /* In case we allocated a Phi0 node at the beginning of this procedure,
2643 we need to exchange this Phi0 with the real Phi. */
2645 exchange(phi0, res);
2646 block->attr.block.graph_arr[pos] = res;
2647 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2648 only an optimization. */
2654 /* This function returns the last definition of a variable. In case
2655 this variable was last defined in a previous block, Phi nodes are
2656 inserted. If the part of the firm graph containing the definition
2657 is not yet constructed, a dummy Phi node is returned. */
2659 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2662 /* There are 4 cases to treat.
2664 1. The block is not mature and we visit it the first time. We can not
2665 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2666 predecessors is returned. This node is added to the linked list (field
2667 "link") of the containing block to be completed when this block is
2668 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2671 2. The value is already known in this block, graph_arr[pos] is set and we
2672 visit the block the first time. We can return the value without
2673 creating any new nodes.
2675 3. The block is mature and we visit it the first time. A Phi node needs
2676 to be created (phi_merge). If the Phi is not needed, as all it's
2677 operands are the same value reaching the block through different
2678 paths, it's optimized away and the value itself is returned.
2680 4. The block is mature, and we visit it the second time. Now two
2681 subcases are possible:
2682 * The value was computed completely the last time we were here. This
2683 is the case if there is no loop. We can return the proper value.
2684 * The recursion that visited this node and set the flag did not
2685 return yet. We are computing a value in a loop and need to
2686 break the recursion. This case only happens if we visited
2687 the same block with phi_merge before, which inserted a Phi0.
2688 So we return the Phi0.
2691 /* case 4 -- already visited. */
2692 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2693 /* As phi_merge allocates a Phi0 this value is always defined. Here
2694 is the critical difference of the two algorithms. */
2695 assert(block->attr.block.graph_arr[pos]);
2696 return block->attr.block.graph_arr[pos];
2699 /* visited the first time */
2700 set_irn_visited(block, get_irg_visited(current_ir_graph));
2702 /* Get the local valid value */
2703 res = block->attr.block.graph_arr[pos];
2705 /* case 2 -- If the value is actually computed, return it. */
2706 if (res) { return res; };
2708 if (block->attr.block.matured) { /* case 3 */
2710 /* The Phi has the same amount of ins as the corresponding block. */
2711 int ins = get_irn_arity(block);
2713 NEW_ARR_A (ir_node *, nin, ins);
2715 /* Phi merge collects the predecessors and then creates a node. */
2716 res = phi_merge (block, pos, mode, nin, ins);
2718 } else { /* case 1 */
2719 /* The block is not mature, we don't know how many in's are needed. A Phi
2720 with zero predecessors is created. Such a Phi node is called Phi0
2721 node. The Phi0 is then added to the list of Phi0 nodes in this block
2722 to be matured by mature_immBlock later.
2723 The Phi0 has to remember the pos of it's internal value. If the real
2724 Phi is computed, pos is used to update the array with the local
2726 res = new_rd_Phi0 (current_ir_graph, block, mode);
2727 res->attr.phi0_pos = pos;
2728 res->link = block->link;
2732 /* If we get here, the frontend missed a use-before-definition error */
2735 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2736 assert (mode->code >= irm_F && mode->code <= irm_P);
2737 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2738 get_mode_null(mode));
2741 /* The local valid value is available now. */
2742 block->attr.block.graph_arr[pos] = res;
2747 #endif /* USE_FAST_PHI_CONSTRUCTION */
2749 /* ************************************************************************** */
2752 * Finalize a Block node, when all control flows are known.
2753 * Acceptable parameters are only Block nodes.
2756 mature_immBlock (ir_node *block)
2762 assert (get_irn_opcode(block) == iro_Block);
2763 /* @@@ should be commented in
2764 assert (!get_Block_matured(block) && "Block already matured"); */
2766 if (!get_Block_matured(block)) {
2767 ins = ARR_LEN (block->in)-1;
2768 /* Fix block parameters */
2769 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2771 /* An array for building the Phi nodes. */
2772 NEW_ARR_A (ir_node *, nin, ins);
2774 /* Traverse a chain of Phi nodes attached to this block and mature
2776 for (n = block->link; n; n=next) {
2777 inc_irg_visited(current_ir_graph);
2779 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2782 block->attr.block.matured = 1;
2784 /* Now, as the block is a finished firm node, we can optimize it.
2785 Since other nodes have been allocated since the block was created
2786 we can not free the node on the obstack. Therefore we have to call
2788 Unfortunately the optimization does not change a lot, as all allocated
2789 nodes refer to the unoptimized node.
2790 We can call _2, as global cse has no effect on blocks. */
2791 block = optimize_in_place_2(block);
2792 IRN_VRFY_IRG(block, current_ir_graph);
2797 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2799 return new_bd_Phi(db, current_ir_graph->current_block,
2804 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2806 return new_bd_Const(db, current_ir_graph->start_block,
2811 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2813 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2817 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, type *tp)
2819 return new_bd_Const_type(db, current_ir_graph->start_block,
2825 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2827 return new_bd_Id(db, current_ir_graph->current_block,
2832 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2834 return new_bd_Proj(db, current_ir_graph->current_block,
2839 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2842 assert(arg->op == op_Cond);
2843 arg->attr.c.kind = fragmentary;
2844 arg->attr.c.default_proj = max_proj;
2845 res = new_Proj (arg, mode_X, max_proj);
2850 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2852 return new_bd_Conv(db, current_ir_graph->current_block,
2857 new_d_Cast (dbg_info *db, ir_node *op, type *to_tp)
2859 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2863 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2865 return new_bd_Tuple(db, current_ir_graph->current_block,
2870 new_d_Add (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2872 return new_bd_Add(db, current_ir_graph->current_block,
2877 new_d_Sub (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2879 return new_bd_Sub(db, current_ir_graph->current_block,
2885 new_d_Minus (dbg_info *db, ir_node *op, ir_mode *mode)
2887 return new_bd_Minus(db, current_ir_graph->current_block,
2892 new_d_Mul (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2894 return new_bd_Mul(db, current_ir_graph->current_block,
2899 * allocate the frag array
2901 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2902 if (get_opt_precise_exc_context()) {
2903 if ((current_ir_graph->phase_state == phase_building) &&
2904 (get_irn_op(res) == op) && /* Could be optimized away. */
2905 !*frag_store) /* Could be a cse where the arr is already set. */ {
2906 *frag_store = new_frag_arr(res);
2913 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2916 res = new_bd_Quot (db, current_ir_graph->current_block,
2918 res->attr.except.pin_state = op_pin_state_pinned;
2919 #if PRECISE_EXC_CONTEXT
2920 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2927 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2930 res = new_bd_DivMod (db, current_ir_graph->current_block,
2932 res->attr.except.pin_state = op_pin_state_pinned;
2933 #if PRECISE_EXC_CONTEXT
2934 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2941 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2944 res = new_bd_Div (db, current_ir_graph->current_block,
2946 res->attr.except.pin_state = op_pin_state_pinned;
2947 #if PRECISE_EXC_CONTEXT
2948 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2955 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2958 res = new_bd_Mod (db, current_ir_graph->current_block,
2960 res->attr.except.pin_state = op_pin_state_pinned;
2961 #if PRECISE_EXC_CONTEXT
2962 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2969 new_d_And (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2971 return new_bd_And (db, current_ir_graph->current_block,
2976 new_d_Or (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2978 return new_bd_Or (db, current_ir_graph->current_block,
2983 new_d_Eor (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2985 return new_bd_Eor (db, current_ir_graph->current_block,
2990 new_d_Not (dbg_info *db, ir_node *op, ir_mode *mode)
2992 return new_bd_Not (db, current_ir_graph->current_block,
2997 new_d_Shl (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
2999 return new_bd_Shl (db, current_ir_graph->current_block,
3004 new_d_Shr (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3006 return new_bd_Shr (db, current_ir_graph->current_block,
3011 new_d_Shrs (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3013 return new_bd_Shrs (db, current_ir_graph->current_block,
3018 new_d_Rot (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3020 return new_bd_Rot (db, current_ir_graph->current_block,
3025 new_d_Abs (dbg_info *db, ir_node *op, ir_mode *mode)
3027 return new_bd_Abs (db, current_ir_graph->current_block,
3032 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
3034 return new_bd_Cmp (db, current_ir_graph->current_block,
3039 new_d_Jmp (dbg_info *db)
3041 return new_bd_Jmp (db, current_ir_graph->current_block);
3045 new_d_IJmp (dbg_info *db, ir_node *tgt)
3047 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
3051 new_d_Cond (dbg_info *db, ir_node *c)
3053 return new_bd_Cond (db, current_ir_graph->current_block, c);
3057 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
3061 res = new_bd_Call (db, current_ir_graph->current_block,
3062 store, callee, arity, in, tp);
3063 #if PRECISE_EXC_CONTEXT
3064 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
3071 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
3073 return new_bd_Return (db, current_ir_graph->current_block,
3078 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
3080 return new_bd_Raise (db, current_ir_graph->current_block,
3085 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
3088 res = new_bd_Load (db, current_ir_graph->current_block,
3090 #if PRECISE_EXC_CONTEXT
3091 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
3098 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
3101 res = new_bd_Store (db, current_ir_graph->current_block,
3103 #if PRECISE_EXC_CONTEXT
3104 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
3111 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, type *alloc_type,
3115 res = new_bd_Alloc (db, current_ir_graph->current_block,
3116 store, size, alloc_type, where);
3117 #if PRECISE_EXC_CONTEXT
3118 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
3125 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
3126 ir_node *size, type *free_type, where_alloc where)
3128 return new_bd_Free (db, current_ir_graph->current_block,
3129 store, ptr, size, free_type, where);
3133 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
3134 /* GL: objptr was called frame before. Frame was a bad choice for the name
3135 as the operand could as well be a pointer to a dynamic object. */
3137 return new_bd_Sel (db, current_ir_graph->current_block,
3138 store, objptr, 0, NULL, ent);
3142 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
3144 return new_bd_Sel (db, current_ir_graph->current_block,
3145 store, objptr, n_index, index, sel);
3149 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
3151 return (new_bd_InstOf (db, current_ir_graph->current_block,
3152 store, objptr, ent));
3156 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, type *tp)
3158 return new_bd_SymConst_type (db, current_ir_graph->start_block,
3163 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
3165 return new_bd_SymConst (db, current_ir_graph->start_block,
3170 new_d_Sync (dbg_info *db, int arity, ir_node** in)
3172 return new_bd_Sync (db, current_ir_graph->current_block,
3179 return _new_d_Bad();
3183 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
3185 return new_bd_Confirm (db, current_ir_graph->current_block,
3190 new_d_Unknown (ir_mode *m)
3192 return new_bd_Unknown(m);
3196 new_d_CallBegin (dbg_info *db, ir_node *call)
3199 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
3204 new_d_EndReg (dbg_info *db)
3207 res = new_bd_EndReg(db, current_ir_graph->current_block);
3212 new_d_EndExcept (dbg_info *db)
3215 res = new_bd_EndExcept(db, current_ir_graph->current_block);
3220 new_d_Break (dbg_info *db)
3222 return new_bd_Break (db, current_ir_graph->current_block);
3226 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
3228 return new_bd_Filter (db, current_ir_graph->current_block,
3235 return _new_d_NoMem();
3239 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
3240 ir_node *ir_true, ir_mode *mode) {
3241 return new_bd_Mux (db, current_ir_graph->current_block,
3242 sel, ir_false, ir_true, mode);
3245 /* ********************************************************************* */
3246 /* Comfortable interface with automatic Phi node construction. */
3247 /* (Uses also constructors of ?? interface, except new_Block. */
3248 /* ********************************************************************* */
3250 /* Block construction */
3251 /* immature Block without predecessors */
3252 ir_node *new_d_immBlock (dbg_info *db) {
3255 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3256 /* creates a new dynamic in-array as length of in is -1 */
3257 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3258 current_ir_graph->current_block = res;
3259 res->attr.block.matured = 0;
3260 res->attr.block.dead = 0;
3261 /* res->attr.block.exc = exc_normal; */
3262 /* res->attr.block.handler_entry = 0; */
3263 res->attr.block.irg = current_ir_graph;
3264 res->attr.block.backedge = NULL;
3265 res->attr.block.in_cg = NULL;
3266 res->attr.block.cg_backedge = NULL;
3267 set_Block_block_visited(res, 0);
3269 /* Create and initialize array for Phi-node construction. */
3270 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3271 current_ir_graph->n_loc);
3272 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3274 /* Immature block may not be optimized! */
3275 IRN_VRFY_IRG(res, current_ir_graph);
3281 new_immBlock (void) {
3282 return new_d_immBlock(NULL);
3285 /* add an edge to a jmp/control flow node */
3287 add_immBlock_pred (ir_node *block, ir_node *jmp)
3289 if (block->attr.block.matured) {
3290 assert(0 && "Error: Block already matured!\n");
3293 assert(jmp != NULL);
3294 ARR_APP1(ir_node *, block->in, jmp);
3298 /* changing the current block */
3300 set_cur_block (ir_node *target) {
3301 current_ir_graph->current_block = target;
3304 /* ************************ */
3305 /* parameter administration */
3307 /* get a value from the parameter array from the current block by its index */
3309 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3311 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3312 inc_irg_visited(current_ir_graph);
3314 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3316 /* get a value from the parameter array from the current block by its index */
3318 get_value (int pos, ir_mode *mode)
3320 return get_d_value(NULL, pos, mode);
3323 /* set a value at position pos in the parameter array from the current block */
3325 set_value (int pos, ir_node *value)
3327 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3328 assert(pos+1 < current_ir_graph->n_loc);
3329 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3332 /* get the current store */
3336 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3337 /* GL: one could call get_value instead */
3338 inc_irg_visited(current_ir_graph);
3339 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3342 /* set the current store */
3344 set_store (ir_node *store)
3346 /* GL: one could call set_value instead */
3347 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3348 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3352 keep_alive (ir_node *ka) {
3353 add_End_keepalive(current_ir_graph->end, ka);
3356 /* --- Useful access routines --- */
3357 /* Returns the current block of the current graph. To set the current
3358 block use set_cur_block. */
3359 ir_node *get_cur_block(void) {
3360 return get_irg_current_block(current_ir_graph);
3363 /* Returns the frame type of the current graph */
3364 type *get_cur_frame_type(void) {
3365 return get_irg_frame_type(current_ir_graph);
3369 /* ********************************************************************* */
3372 /* call once for each run of the library */
3374 init_cons(uninitialized_local_variable_func_t *func)
3376 default_initialize_local_variable = func;
3379 /* call for each graph */
3381 irg_finalize_cons (ir_graph *irg) {
3382 irg->phase_state = phase_high;
3386 irp_finalize_cons (void) {
3387 int i, n_irgs = get_irp_n_irgs();
3388 for (i = 0; i < n_irgs; i++) {
3389 irg_finalize_cons(get_irp_irg(i));
3391 irp->phase_state = phase_high;\
3397 ir_node *new_Block(int arity, ir_node **in) {
3398 return new_d_Block(NULL, arity, in);
3400 ir_node *new_Start (void) {
3401 return new_d_Start(NULL);
3403 ir_node *new_End (void) {
3404 return new_d_End(NULL);
3406 ir_node *new_Jmp (void) {
3407 return new_d_Jmp(NULL);
3409 ir_node *new_IJmp (ir_node *tgt) {
3410 return new_d_IJmp(NULL, tgt);
3412 ir_node *new_Cond (ir_node *c) {
3413 return new_d_Cond(NULL, c);
3415 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3416 return new_d_Return(NULL, store, arity, in);
3418 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3419 return new_d_Raise(NULL, store, obj);
3421 ir_node *new_Const (ir_mode *mode, tarval *con) {
3422 return new_d_Const(NULL, mode, con);
3425 ir_node *new_Const_long(ir_mode *mode, long value)
3427 return new_d_Const_long(NULL, mode, value);
3430 ir_node *new_Const_type(tarval *con, type *tp) {
3431 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3434 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3435 return new_d_SymConst(NULL, value, kind);
3437 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3438 return new_d_simpleSel(NULL, store, objptr, ent);
3440 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3442 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3444 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
3445 return new_d_InstOf (NULL, store, objptr, ent);
3447 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3449 return new_d_Call(NULL, store, callee, arity, in, tp);
3451 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3452 return new_d_Add(NULL, op1, op2, mode);
3454 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3455 return new_d_Sub(NULL, op1, op2, mode);
3457 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3458 return new_d_Minus(NULL, op, mode);
3460 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3461 return new_d_Mul(NULL, op1, op2, mode);
3463 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3464 return new_d_Quot(NULL, memop, op1, op2);
3466 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3467 return new_d_DivMod(NULL, memop, op1, op2);
3469 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3470 return new_d_Div(NULL, memop, op1, op2);
3472 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3473 return new_d_Mod(NULL, memop, op1, op2);
3475 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3476 return new_d_Abs(NULL, op, mode);
3478 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3479 return new_d_And(NULL, op1, op2, mode);
3481 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3482 return new_d_Or(NULL, op1, op2, mode);
3484 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3485 return new_d_Eor(NULL, op1, op2, mode);
3487 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3488 return new_d_Not(NULL, op, mode);
3490 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3491 return new_d_Shl(NULL, op, k, mode);
3493 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3494 return new_d_Shr(NULL, op, k, mode);
3496 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3497 return new_d_Shrs(NULL, op, k, mode);
3499 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3500 return new_d_Rot(NULL, op, k, mode);
3502 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3503 return new_d_Cmp(NULL, op1, op2);
3505 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3506 return new_d_Conv(NULL, op, mode);
3508 ir_node *new_Cast (ir_node *op, type *to_tp) {
3509 return new_d_Cast(NULL, op, to_tp);
3511 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3512 return new_d_Phi(NULL, arity, in, mode);
3514 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3515 return new_d_Load(NULL, store, addr, mode);
3517 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3518 return new_d_Store(NULL, store, addr, val);
3520 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
3521 where_alloc where) {
3522 return new_d_Alloc(NULL, store, size, alloc_type, where);
3524 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3525 type *free_type, where_alloc where) {
3526 return new_d_Free(NULL, store, ptr, size, free_type, where);
3528 ir_node *new_Sync (int arity, ir_node **in) {
3529 return new_d_Sync(NULL, arity, in);
3531 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3532 return new_d_Proj(NULL, arg, mode, proj);
3534 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3535 return new_d_defaultProj(NULL, arg, max_proj);
3537 ir_node *new_Tuple (int arity, ir_node **in) {
3538 return new_d_Tuple(NULL, arity, in);
3540 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3541 return new_d_Id(NULL, val, mode);
3543 ir_node *new_Bad (void) {
3546 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3547 return new_d_Confirm (NULL, val, bound, cmp);
3549 ir_node *new_Unknown(ir_mode *m) {
3550 return new_d_Unknown(m);
3552 ir_node *new_CallBegin (ir_node *callee) {
3553 return new_d_CallBegin(NULL, callee);
3555 ir_node *new_EndReg (void) {
3556 return new_d_EndReg(NULL);
3558 ir_node *new_EndExcept (void) {
3559 return new_d_EndExcept(NULL);
3561 ir_node *new_Break (void) {
3562 return new_d_Break(NULL);
3564 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3565 return new_d_Filter(NULL, arg, mode, proj);
3567 ir_node *new_NoMem (void) {
3568 return new_d_NoMem();
3570 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3571 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);