3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* Constructs a Block with a fixed number of predecessors.
67 Does not set current_block. Can not be used with automatic
68 Phi node construction. */
70 new_bd_Block (dbg_info *db, int arity, ir_node **in)
73 ir_graph *irg = current_ir_graph;
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
86 res->attr.block.extblk = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_bd_Start (dbg_info *db, ir_node *block)
96 ir_graph *irg = current_ir_graph;
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_bd_End (dbg_info *db, ir_node *block)
109 ir_graph *irg = current_ir_graph;
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 ir_graph *irg = current_ir_graph;
125 bool has_unknown = false;
127 /* Don't assert that block matured: the use of this constructor is strongly
129 if ( get_Block_matured(block) )
130 assert( get_irn_arity(block) == arity );
132 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
134 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
136 for (i = arity-1; i >= 0; i--)
137 if (get_irn_op(in[i]) == op_Unknown) {
142 if (!has_unknown) res = optimize_node (res);
143 IRN_VRFY_IRG(res, irg);
145 /* Memory Phis in endless loops must be kept alive.
146 As we can't distinguish these easily we keep all of them alive. */
147 if ((res->op == op_Phi) && (mode == mode_M))
148 add_End_keepalive(irg->end, res);
153 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 ir_graph *irg = current_ir_graph;
158 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
159 res->attr.con.tv = con;
160 set_Const_type(res, tp); /* Call method because of complex assertion. */
161 res = optimize_node (res);
162 assert(get_Const_type(res) == tp);
163 IRN_VRFY_IRG(res, irg);
169 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
171 ir_graph *irg = current_ir_graph;
173 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
177 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
179 ir_graph *irg = current_ir_graph;
181 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
185 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
191 res = optimize_node(res);
192 IRN_VRFY_IRG(res, irg);
197 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
204 res->attr.proj = proj;
207 assert(get_Proj_pred(res));
208 assert(get_nodes_block(get_Proj_pred(res)));
210 res = optimize_node(res);
212 IRN_VRFY_IRG(res, irg);
218 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
222 ir_graph *irg = current_ir_graph;
224 assert(arg->op == op_Cond);
225 arg->attr.c.kind = fragmentary;
226 arg->attr.c.default_proj = max_proj;
227 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
232 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
235 ir_graph *irg = current_ir_graph;
237 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
238 res = optimize_node(res);
239 IRN_VRFY_IRG(res, irg);
244 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, type *to_tp)
247 ir_graph *irg = current_ir_graph;
249 assert(is_atomic_type(to_tp));
251 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
252 res->attr.cast.totype = to_tp;
253 res = optimize_node(res);
254 IRN_VRFY_IRG(res, irg);
259 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_bd_Add (dbg_info *db, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
276 ir_graph *irg = current_ir_graph;
280 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
281 res = optimize_node(res);
282 IRN_VRFY_IRG(res, irg);
287 new_bd_Sub (dbg_info *db, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
297 res = optimize_node (res);
298 IRN_VRFY_IRG(res, irg);
303 new_bd_Minus (dbg_info *db, ir_node *block,
304 ir_node *op, ir_mode *mode)
307 ir_graph *irg = current_ir_graph;
309 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
310 res = optimize_node(res);
311 IRN_VRFY_IRG(res, irg);
316 new_bd_Mul (dbg_info *db, ir_node *block,
317 ir_node *op1, ir_node *op2, ir_mode *mode)
321 ir_graph *irg = current_ir_graph;
325 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
326 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_Quot (dbg_info *db, ir_node *block,
333 ir_node *memop, ir_node *op1, ir_node *op2)
337 ir_graph *irg = current_ir_graph;
342 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
343 res = optimize_node(res);
344 IRN_VRFY_IRG(res, irg);
349 new_bd_DivMod (dbg_info *db, ir_node *block,
350 ir_node *memop, ir_node *op1, ir_node *op2)
354 ir_graph *irg = current_ir_graph;
359 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
360 res = optimize_node(res);
361 IRN_VRFY_IRG(res, irg);
366 new_bd_Div (dbg_info *db, ir_node *block,
367 ir_node *memop, ir_node *op1, ir_node *op2)
371 ir_graph *irg = current_ir_graph;
376 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
377 res = optimize_node(res);
378 IRN_VRFY_IRG(res, irg);
383 new_bd_Mod (dbg_info *db, ir_node *block,
384 ir_node *memop, ir_node *op1, ir_node *op2)
388 ir_graph *irg = current_ir_graph;
393 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
394 res = optimize_node(res);
395 IRN_VRFY_IRG(res, irg);
400 new_bd_And (dbg_info *db, ir_node *block,
401 ir_node *op1, ir_node *op2, ir_mode *mode)
405 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Or (dbg_info *db, ir_node *block,
417 ir_node *op1, ir_node *op2, ir_mode *mode)
421 ir_graph *irg = current_ir_graph;
425 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_bd_Eor (dbg_info *db, ir_node *block,
433 ir_node *op1, ir_node *op2, ir_mode *mode)
437 ir_graph *irg = current_ir_graph;
441 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
442 res = optimize_node (res);
443 IRN_VRFY_IRG(res, irg);
448 new_bd_Not (dbg_info *db, ir_node *block,
449 ir_node *op, ir_mode *mode)
452 ir_graph *irg = current_ir_graph;
454 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
455 res = optimize_node(res);
456 IRN_VRFY_IRG(res, irg);
461 new_bd_Shl (dbg_info *db, ir_node *block,
462 ir_node *op, ir_node *k, ir_mode *mode)
466 ir_graph *irg = current_ir_graph;
470 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_bd_Shr (dbg_info *db, ir_node *block,
478 ir_node *op, ir_node *k, ir_mode *mode)
482 ir_graph *irg = current_ir_graph;
486 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
487 res = optimize_node(res);
488 IRN_VRFY_IRG(res, irg);
493 new_bd_Shrs (dbg_info *db, ir_node *block,
494 ir_node *op, ir_node *k, ir_mode *mode)
498 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
503 res = optimize_node(res);
504 IRN_VRFY_IRG(res, irg);
509 new_bd_Rot (dbg_info *db, ir_node *block,
510 ir_node *op, ir_node *k, ir_mode *mode)
514 ir_graph *irg = current_ir_graph;
518 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
519 res = optimize_node(res);
520 IRN_VRFY_IRG(res, irg);
525 new_bd_Abs (dbg_info *db, ir_node *block,
526 ir_node *op, ir_mode *mode)
529 ir_graph *irg = current_ir_graph;
531 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
532 res = optimize_node (res);
533 IRN_VRFY_IRG(res, irg);
538 new_bd_Cmp (dbg_info *db, ir_node *block,
539 ir_node *op1, ir_node *op2)
543 ir_graph *irg = current_ir_graph;
548 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
549 res = optimize_node(res);
550 IRN_VRFY_IRG(res, irg);
555 new_bd_Jmp (dbg_info *db, ir_node *block)
558 ir_graph *irg = current_ir_graph;
560 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
561 res = optimize_node (res);
562 IRN_VRFY_IRG (res, irg);
567 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
570 ir_graph *irg = current_ir_graph;
572 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
573 res = optimize_node (res);
574 IRN_VRFY_IRG (res, irg);
576 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
582 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
585 ir_graph *irg = current_ir_graph;
587 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
588 res->attr.c.kind = dense;
589 res->attr.c.default_proj = 0;
590 res->attr.c.pred = COND_JMP_PRED_NONE;
591 res = optimize_node (res);
592 IRN_VRFY_IRG(res, irg);
597 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
598 ir_node *callee, int arity, ir_node **in, type *tp)
603 ir_graph *irg = current_ir_graph;
606 NEW_ARR_A(ir_node *, r_in, r_arity);
609 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
611 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
613 assert((get_unknown_type() == tp) || is_Method_type(tp));
614 set_Call_type(res, tp);
615 res->attr.call.exc.pin_state = op_pin_state_pinned;
616 res->attr.call.callee_arr = NULL;
617 res = optimize_node(res);
618 IRN_VRFY_IRG(res, irg);
623 new_bd_Return (dbg_info *db, ir_node *block,
624 ir_node *store, int arity, ir_node **in)
629 ir_graph *irg = current_ir_graph;
632 NEW_ARR_A (ir_node *, r_in, r_arity);
634 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
635 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
636 res = optimize_node(res);
637 IRN_VRFY_IRG(res, irg);
642 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
646 ir_graph *irg = current_ir_graph;
650 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
651 res = optimize_node(res);
652 IRN_VRFY_IRG(res, irg);
657 new_bd_Load (dbg_info *db, ir_node *block,
658 ir_node *store, ir_node *adr, ir_mode *mode)
662 ir_graph *irg = current_ir_graph;
666 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
667 res->attr.load.exc.pin_state = op_pin_state_pinned;
668 res->attr.load.load_mode = mode;
669 res->attr.load.volatility = volatility_non_volatile;
670 res = optimize_node(res);
671 IRN_VRFY_IRG(res, irg);
676 new_bd_Store (dbg_info *db, ir_node *block,
677 ir_node *store, ir_node *adr, ir_node *val)
681 ir_graph *irg = current_ir_graph;
686 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
687 res->attr.store.exc.pin_state = op_pin_state_pinned;
688 res->attr.store.volatility = volatility_non_volatile;
689 res = optimize_node(res);
690 IRN_VRFY_IRG(res, irg);
695 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
696 ir_node *size, type *alloc_type, where_alloc where)
700 ir_graph *irg = current_ir_graph;
704 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
705 res->attr.a.exc.pin_state = op_pin_state_pinned;
706 res->attr.a.where = where;
707 res->attr.a.type = alloc_type;
708 res = optimize_node(res);
709 IRN_VRFY_IRG(res, irg);
714 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
715 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
719 ir_graph *irg = current_ir_graph;
724 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
725 res->attr.f.where = where;
726 res->attr.f.type = free_type;
727 res = optimize_node(res);
728 IRN_VRFY_IRG(res, irg);
733 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
734 int arity, ir_node **in, entity *ent)
739 ir_graph *irg = current_ir_graph;
741 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
744 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
747 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
749 * FIXM: Sel's can select functions which should be of mode mode_P_code.
751 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
752 res->attr.s.ent = ent;
753 res = optimize_node(res);
754 IRN_VRFY_IRG(res, irg);
759 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
760 ir_node *objptr, type *ent)
765 ir_graph *irg = current_ir_graph;
768 NEW_ARR_A(ir_node *, r_in, r_arity);
772 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
773 res->attr.io.ent = ent;
775 /* res = optimize(res); */
776 IRN_VRFY_IRG(res, irg);
781 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
782 symconst_kind symkind, type *tp) {
785 ir_graph *irg = current_ir_graph;
787 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
788 mode = mode_P_data; /* FIXME: can be mode_P_code */
792 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
794 res->attr.i.num = symkind;
795 res->attr.i.sym = value;
798 res = optimize_node(res);
799 IRN_VRFY_IRG(res, irg);
804 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
805 symconst_kind symkind)
807 ir_graph *irg = current_ir_graph;
809 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
814 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
817 ir_graph *irg = current_ir_graph;
819 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
820 res = optimize_node(res);
821 IRN_VRFY_IRG(res, irg);
826 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
828 ir_node *in[2], *res;
829 ir_graph *irg = current_ir_graph;
833 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
834 res->attr.confirm_cmp = cmp;
835 res = optimize_node (res);
836 IRN_VRFY_IRG(res, irg);
840 /* this function is often called with current_ir_graph unset */
842 new_bd_Unknown (ir_mode *m)
845 ir_graph *irg = current_ir_graph;
847 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
848 res = optimize_node(res);
853 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
857 ir_graph *irg = current_ir_graph;
859 in[0] = get_Call_ptr(call);
860 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
861 /* res->attr.callbegin.irg = irg; */
862 res->attr.callbegin.call = call;
863 res = optimize_node(res);
864 IRN_VRFY_IRG(res, irg);
869 new_bd_EndReg (dbg_info *db, ir_node *block)
872 ir_graph *irg = current_ir_graph;
874 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
876 IRN_VRFY_IRG(res, irg);
881 new_bd_EndExcept (dbg_info *db, ir_node *block)
884 ir_graph *irg = current_ir_graph;
886 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
887 irg->end_except = res;
888 IRN_VRFY_IRG (res, irg);
893 new_bd_Break (dbg_info *db, ir_node *block)
896 ir_graph *irg = current_ir_graph;
898 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
899 res = optimize_node(res);
900 IRN_VRFY_IRG(res, irg);
905 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
909 ir_graph *irg = current_ir_graph;
911 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
912 res->attr.filter.proj = proj;
913 res->attr.filter.in_cg = NULL;
914 res->attr.filter.backedge = NULL;
917 assert(get_Proj_pred(res));
918 assert(get_nodes_block(get_Proj_pred(res)));
920 res = optimize_node(res);
921 IRN_VRFY_IRG(res, irg);
926 new_bd_Mux (dbg_info *db, ir_node *block,
927 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
931 ir_graph *irg = current_ir_graph;
937 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
940 res = optimize_node(res);
941 IRN_VRFY_IRG(res, irg);
945 /* --------------------------------------------- */
946 /* private interfaces, for professional use only */
947 /* --------------------------------------------- */
949 /* Constructs a Block with a fixed number of predecessors.
950 Does not set current_block. Can not be used with automatic
951 Phi node construction. */
953 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
955 ir_graph *rem = current_ir_graph;
958 current_ir_graph = irg;
959 res = new_bd_Block (db, arity, in);
960 current_ir_graph = rem;
966 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
968 ir_graph *rem = current_ir_graph;
971 current_ir_graph = irg;
972 res = new_bd_Start (db, block);
973 current_ir_graph = rem;
979 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
982 ir_graph *rem = current_ir_graph;
984 current_ir_graph = rem;
985 res = new_bd_End (db, block);
986 current_ir_graph = rem;
991 /* Creates a Phi node with all predecessors. Calling this constructor
992 is only allowed if the corresponding block is mature. */
994 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
997 ir_graph *rem = current_ir_graph;
999 current_ir_graph = irg;
1000 res = new_bd_Phi (db, block,arity, in, mode);
1001 current_ir_graph = rem;
1007 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
1010 ir_graph *rem = current_ir_graph;
1012 current_ir_graph = irg;
1013 res = new_bd_Const_type (db, block, mode, con, tp);
1014 current_ir_graph = rem;
1020 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
1023 ir_graph *rem = current_ir_graph;
1025 current_ir_graph = irg;
1026 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
1027 current_ir_graph = rem;
1033 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
1035 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
1039 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
1042 ir_graph *rem = current_ir_graph;
1044 current_ir_graph = irg;
1045 res = new_bd_Id(db, block, val, mode);
1046 current_ir_graph = rem;
1052 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1056 ir_graph *rem = current_ir_graph;
1058 current_ir_graph = irg;
1059 res = new_bd_Proj(db, block, arg, mode, proj);
1060 current_ir_graph = rem;
1066 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1070 ir_graph *rem = current_ir_graph;
1072 current_ir_graph = irg;
1073 res = new_bd_defaultProj(db, block, arg, max_proj);
1074 current_ir_graph = rem;
1080 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1083 ir_graph *rem = current_ir_graph;
1085 current_ir_graph = irg;
1086 res = new_bd_Conv(db, block, op, mode);
1087 current_ir_graph = rem;
1093 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
1096 ir_graph *rem = current_ir_graph;
1098 current_ir_graph = irg;
1099 res = new_bd_Cast(db, block, op, to_tp);
1100 current_ir_graph = rem;
1106 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1109 ir_graph *rem = current_ir_graph;
1111 current_ir_graph = irg;
1112 res = new_bd_Tuple(db, block, arity, in);
1113 current_ir_graph = rem;
1119 new_rd_Add (dbg_info *db, ir_graph *irg, ir_node *block,
1120 ir_node *op1, ir_node *op2, ir_mode *mode)
1123 ir_graph *rem = current_ir_graph;
1125 current_ir_graph = irg;
1126 res = new_bd_Add(db, block, op1, op2, mode);
1127 current_ir_graph = rem;
1133 new_rd_Sub (dbg_info *db, ir_graph *irg, ir_node *block,
1134 ir_node *op1, ir_node *op2, ir_mode *mode)
1137 ir_graph *rem = current_ir_graph;
1139 current_ir_graph = irg;
1140 res = new_bd_Sub(db, block, op1, op2, mode);
1141 current_ir_graph = rem;
1147 new_rd_Minus (dbg_info *db, ir_graph *irg, ir_node *block,
1148 ir_node *op, ir_mode *mode)
1151 ir_graph *rem = current_ir_graph;
1153 current_ir_graph = irg;
1154 res = new_bd_Minus(db, block, op, mode);
1155 current_ir_graph = rem;
1161 new_rd_Mul (dbg_info *db, ir_graph *irg, ir_node *block,
1162 ir_node *op1, ir_node *op2, ir_mode *mode)
1165 ir_graph *rem = current_ir_graph;
1167 current_ir_graph = irg;
1168 res = new_bd_Mul(db, block, op1, op2, mode);
1169 current_ir_graph = rem;
1175 new_rd_Quot (dbg_info *db, ir_graph *irg, ir_node *block,
1176 ir_node *memop, ir_node *op1, ir_node *op2)
1179 ir_graph *rem = current_ir_graph;
1181 current_ir_graph = irg;
1182 res = new_bd_Quot(db, block, memop, op1, op2);
1183 current_ir_graph = rem;
1189 new_rd_DivMod (dbg_info *db, ir_graph *irg, ir_node *block,
1190 ir_node *memop, ir_node *op1, ir_node *op2)
1193 ir_graph *rem = current_ir_graph;
1195 current_ir_graph = irg;
1196 res = new_bd_DivMod(db, block, memop, op1, op2);
1197 current_ir_graph = rem;
1203 new_rd_Div (dbg_info *db, ir_graph *irg, ir_node *block,
1204 ir_node *memop, ir_node *op1, ir_node *op2)
1207 ir_graph *rem = current_ir_graph;
1209 current_ir_graph = irg;
1210 res = new_bd_Div (db, block, memop, op1, op2);
1211 current_ir_graph =rem;
1217 new_rd_Mod (dbg_info *db, ir_graph *irg, ir_node *block,
1218 ir_node *memop, ir_node *op1, ir_node *op2)
1221 ir_graph *rem = current_ir_graph;
1223 current_ir_graph = irg;
1224 res = new_bd_Mod(db, block, memop, op1, op2);
1225 current_ir_graph = rem;
1231 new_rd_And (dbg_info *db, ir_graph *irg, ir_node *block,
1232 ir_node *op1, ir_node *op2, ir_mode *mode)
1235 ir_graph *rem = current_ir_graph;
1237 current_ir_graph = irg;
1238 res = new_bd_And(db, block, op1, op2, mode);
1239 current_ir_graph = rem;
1245 new_rd_Or (dbg_info *db, ir_graph *irg, ir_node *block,
1246 ir_node *op1, ir_node *op2, ir_mode *mode)
1249 ir_graph *rem = current_ir_graph;
1251 current_ir_graph = irg;
1252 res = new_bd_Or(db, block, op1, op2, mode);
1253 current_ir_graph = rem;
1259 new_rd_Eor (dbg_info *db, ir_graph *irg, ir_node *block,
1260 ir_node *op1, ir_node *op2, ir_mode *mode)
1263 ir_graph *rem = current_ir_graph;
1265 current_ir_graph = irg;
1266 res = new_bd_Eor(db, block, op1, op2, mode);
1267 current_ir_graph = rem;
1273 new_rd_Not (dbg_info *db, ir_graph *irg, ir_node *block,
1274 ir_node *op, ir_mode *mode)
1277 ir_graph *rem = current_ir_graph;
1279 current_ir_graph = irg;
1280 res = new_bd_Not(db, block, op, mode);
1281 current_ir_graph = rem;
1287 new_rd_Shl (dbg_info *db, ir_graph *irg, ir_node *block,
1288 ir_node *op, ir_node *k, ir_mode *mode)
1291 ir_graph *rem = current_ir_graph;
1293 current_ir_graph = irg;
1294 res = new_bd_Shl (db, block, op, k, mode);
1295 current_ir_graph = rem;
1301 new_rd_Shr (dbg_info *db, ir_graph *irg, ir_node *block,
1302 ir_node *op, ir_node *k, ir_mode *mode)
1305 ir_graph *rem = current_ir_graph;
1307 current_ir_graph = irg;
1308 res = new_bd_Shr(db, block, op, k, mode);
1309 current_ir_graph = rem;
1315 new_rd_Shrs (dbg_info *db, ir_graph *irg, ir_node *block,
1316 ir_node *op, ir_node *k, ir_mode *mode)
1319 ir_graph *rem = current_ir_graph;
1321 current_ir_graph = irg;
1322 res = new_bd_Shrs(db, block, op, k, mode);
1323 current_ir_graph = rem;
1329 new_rd_Rot (dbg_info *db, ir_graph *irg, ir_node *block,
1330 ir_node *op, ir_node *k, ir_mode *mode)
1333 ir_graph *rem = current_ir_graph;
1335 current_ir_graph = irg;
1336 res = new_bd_Rot(db, block, op, k, mode);
1337 current_ir_graph = rem;
1343 new_rd_Abs (dbg_info *db, ir_graph *irg, ir_node *block,
1344 ir_node *op, ir_mode *mode)
1347 ir_graph *rem = current_ir_graph;
1349 current_ir_graph = irg;
1350 res = new_bd_Abs(db, block, op, mode);
1351 current_ir_graph = rem;
1357 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1358 ir_node *op1, ir_node *op2)
1361 ir_graph *rem = current_ir_graph;
1363 current_ir_graph = irg;
1364 res = new_bd_Cmp(db, block, op1, op2);
1365 current_ir_graph = rem;
1371 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1374 ir_graph *rem = current_ir_graph;
1376 current_ir_graph = irg;
1377 res = new_bd_Jmp(db, block);
1378 current_ir_graph = rem;
1384 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1387 ir_graph *rem = current_ir_graph;
1389 current_ir_graph = irg;
1390 res = new_bd_IJmp(db, block, tgt);
1391 current_ir_graph = rem;
1397 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1400 ir_graph *rem = current_ir_graph;
1402 current_ir_graph = irg;
1403 res = new_bd_Cond(db, block, c);
1404 current_ir_graph = rem;
1410 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1411 ir_node *callee, int arity, ir_node **in, type *tp)
1414 ir_graph *rem = current_ir_graph;
1416 current_ir_graph = irg;
1417 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1418 current_ir_graph = rem;
1424 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1425 ir_node *store, int arity, ir_node **in)
1428 ir_graph *rem = current_ir_graph;
1430 current_ir_graph = irg;
1431 res = new_bd_Return(db, block, store, arity, in);
1432 current_ir_graph = rem;
1438 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1441 ir_graph *rem = current_ir_graph;
1443 current_ir_graph = irg;
1444 res = new_bd_Raise(db, block, store, obj);
1445 current_ir_graph = rem;
1451 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1452 ir_node *store, ir_node *adr, ir_mode *mode)
1455 ir_graph *rem = current_ir_graph;
1457 current_ir_graph = irg;
1458 res = new_bd_Load(db, block, store, adr, mode);
1459 current_ir_graph = rem;
1465 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1466 ir_node *store, ir_node *adr, ir_node *val)
1469 ir_graph *rem = current_ir_graph;
1471 current_ir_graph = irg;
1472 res = new_bd_Store(db, block, store, adr, val);
1473 current_ir_graph = rem;
1479 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1480 ir_node *size, type *alloc_type, where_alloc where)
1483 ir_graph *rem = current_ir_graph;
1485 current_ir_graph = irg;
1486 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1487 current_ir_graph = rem;
1493 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1494 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
1497 ir_graph *rem = current_ir_graph;
1499 current_ir_graph = irg;
1500 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1501 current_ir_graph = rem;
1507 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1508 int arity, ir_node **in, entity *ent)
1511 ir_graph *rem = current_ir_graph;
1513 current_ir_graph = irg;
1514 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1515 current_ir_graph = rem;
1521 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1522 ir_node *objptr, type *ent)
1525 ir_graph *rem = current_ir_graph;
1527 current_ir_graph = irg;
1528 res = new_bd_InstOf(db, block, store, objptr, ent);
1529 current_ir_graph = rem;
1535 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1536 symconst_kind symkind, type *tp)
1539 ir_graph *rem = current_ir_graph;
1541 current_ir_graph = irg;
1542 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1543 current_ir_graph = rem;
1549 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1550 symconst_kind symkind)
1552 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1556 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp)
1558 symconst_symbol sym = {(type *)symbol};
1559 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1562 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
1563 symconst_symbol sym = {(type *)symbol};
1564 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1567 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1568 symconst_symbol sym = {symbol};
1569 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1572 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1573 symconst_symbol sym = {symbol};
1574 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1578 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1581 ir_graph *rem = current_ir_graph;
1583 current_ir_graph = irg;
1584 res = new_bd_Sync(db, block, arity, in);
1585 current_ir_graph = rem;
1591 new_rd_Bad (ir_graph *irg)
1597 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1600 ir_graph *rem = current_ir_graph;
1602 current_ir_graph = irg;
1603 res = new_bd_Confirm(db, block, val, bound, cmp);
1604 current_ir_graph = rem;
1609 /* this function is often called with current_ir_graph unset */
1611 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1614 ir_graph *rem = current_ir_graph;
1616 current_ir_graph = irg;
1617 res = new_bd_Unknown(m);
1618 current_ir_graph = rem;
1624 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1627 ir_graph *rem = current_ir_graph;
1629 current_ir_graph = irg;
1630 res = new_bd_CallBegin(db, block, call);
1631 current_ir_graph = rem;
1637 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1641 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1643 IRN_VRFY_IRG(res, irg);
1648 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1652 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1653 irg->end_except = res;
1654 IRN_VRFY_IRG (res, irg);
1659 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1662 ir_graph *rem = current_ir_graph;
1664 current_ir_graph = irg;
1665 res = new_bd_Break(db, block);
1666 current_ir_graph = rem;
1672 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1676 ir_graph *rem = current_ir_graph;
1678 current_ir_graph = irg;
1679 res = new_bd_Filter(db, block, arg, mode, proj);
1680 current_ir_graph = rem;
1686 new_rd_NoMem (ir_graph *irg) {
1691 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1692 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1695 ir_graph *rem = current_ir_graph;
1697 current_ir_graph = irg;
1698 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1699 current_ir_graph = rem;
1705 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1706 return new_rd_Block(NULL, irg, arity, in);
1708 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1709 return new_rd_Start(NULL, irg, block);
1711 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1712 return new_rd_End(NULL, irg, block);
1714 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1715 return new_rd_Jmp(NULL, irg, block);
1717 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1718 return new_rd_IJmp(NULL, irg, block, tgt);
1720 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1721 return new_rd_Cond(NULL, irg, block, c);
1723 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1724 ir_node *store, int arity, ir_node **in) {
1725 return new_rd_Return(NULL, irg, block, store, arity, in);
1727 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1728 ir_node *store, ir_node *obj) {
1729 return new_rd_Raise(NULL, irg, block, store, obj);
1731 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1732 ir_mode *mode, tarval *con) {
1733 return new_rd_Const(NULL, irg, block, mode, con);
1736 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1737 ir_mode *mode, long value) {
1738 return new_rd_Const_long(NULL, irg, block, mode, value);
1741 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1742 ir_mode *mode, tarval *con, type *tp) {
1743 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1746 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1747 symconst_symbol value, symconst_kind symkind) {
1748 return new_rd_SymConst(NULL, irg, block, value, symkind);
1750 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1751 ir_node *objptr, int n_index, ir_node **index,
1753 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1755 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1757 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
1759 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1760 ir_node *callee, int arity, ir_node **in,
1762 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1764 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1765 ir_node *op1, ir_node *op2, ir_mode *mode) {
1766 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1768 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1769 ir_node *op1, ir_node *op2, ir_mode *mode) {
1770 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1772 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1773 ir_node *op, ir_mode *mode) {
1774 return new_rd_Minus(NULL, irg, block, op, mode);
1776 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1777 ir_node *op1, ir_node *op2, ir_mode *mode) {
1778 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1780 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1781 ir_node *memop, ir_node *op1, ir_node *op2) {
1782 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1784 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1785 ir_node *memop, ir_node *op1, ir_node *op2) {
1786 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1788 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1789 ir_node *memop, ir_node *op1, ir_node *op2) {
1790 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1792 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1793 ir_node *memop, ir_node *op1, ir_node *op2) {
1794 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1796 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1797 ir_node *op, ir_mode *mode) {
1798 return new_rd_Abs(NULL, irg, block, op, mode);
1800 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1801 ir_node *op1, ir_node *op2, ir_mode *mode) {
1802 return new_rd_And(NULL, irg, block, op1, op2, mode);
1804 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1805 ir_node *op1, ir_node *op2, ir_mode *mode) {
1806 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1808 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1809 ir_node *op1, ir_node *op2, ir_mode *mode) {
1810 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1812 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1813 ir_node *op, ir_mode *mode) {
1814 return new_rd_Not(NULL, irg, block, op, mode);
1816 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1817 ir_node *op1, ir_node *op2) {
1818 return new_rd_Cmp(NULL, irg, block, op1, op2);
1820 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1821 ir_node *op, ir_node *k, ir_mode *mode) {
1822 return new_rd_Shl(NULL, irg, block, op, k, mode);
1824 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1825 ir_node *op, ir_node *k, ir_mode *mode) {
1826 return new_rd_Shr(NULL, irg, block, op, k, mode);
1828 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1829 ir_node *op, ir_node *k, ir_mode *mode) {
1830 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1832 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1833 ir_node *op, ir_node *k, ir_mode *mode) {
1834 return new_rd_Rot(NULL, irg, block, op, k, mode);
1836 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1837 ir_node *op, ir_mode *mode) {
1838 return new_rd_Conv(NULL, irg, block, op, mode);
1840 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1841 return new_rd_Cast(NULL, irg, block, op, to_tp);
1843 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1844 ir_node **in, ir_mode *mode) {
1845 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1847 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1848 ir_node *store, ir_node *adr, ir_mode *mode) {
1849 return new_rd_Load(NULL, irg, block, store, adr, mode);
1851 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1852 ir_node *store, ir_node *adr, ir_node *val) {
1853 return new_rd_Store(NULL, irg, block, store, adr, val);
1855 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1856 ir_node *size, type *alloc_type, where_alloc where) {
1857 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1859 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1860 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1861 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1863 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1864 return new_rd_Sync(NULL, irg, block, arity, in);
1866 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1867 ir_mode *mode, long proj) {
1868 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1870 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1872 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1874 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1875 int arity, ir_node **in) {
1876 return new_rd_Tuple(NULL, irg, block, arity, in );
1878 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1879 ir_node *val, ir_mode *mode) {
1880 return new_rd_Id(NULL, irg, block, val, mode);
1882 ir_node *new_r_Bad (ir_graph *irg) {
1883 return new_rd_Bad(irg);
1885 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1886 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1888 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1889 return new_rd_Unknown(irg, m);
1891 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1892 return new_rd_CallBegin(NULL, irg, block, callee);
1894 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1895 return new_rd_EndReg(NULL, irg, block);
1897 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1898 return new_rd_EndExcept(NULL, irg, block);
1900 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1901 return new_rd_Break(NULL, irg, block);
1903 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1904 ir_mode *mode, long proj) {
1905 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1907 ir_node *new_r_NoMem (ir_graph *irg) {
1908 return new_rd_NoMem(irg);
1910 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1911 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1912 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1916 /** ********************/
1917 /** public interfaces */
1918 /** construction tools */
1922 * - create a new Start node in the current block
1924 * @return s - pointer to the created Start node
1929 new_d_Start (dbg_info *db)
1933 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1934 op_Start, mode_T, 0, NULL);
1935 /* res->attr.start.irg = current_ir_graph; */
1937 res = optimize_node(res);
1938 IRN_VRFY_IRG(res, current_ir_graph);
1943 new_d_End (dbg_info *db)
1946 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1947 op_End, mode_X, -1, NULL);
1948 res = optimize_node(res);
1949 IRN_VRFY_IRG(res, current_ir_graph);
1954 /* Constructs a Block with a fixed number of predecessors.
1955 Does set current_block. Can be used with automatic Phi
1956 node construction. */
1958 new_d_Block (dbg_info *db, int arity, ir_node **in)
1962 bool has_unknown = false;
1964 res = new_bd_Block(db, arity, in);
1966 /* Create and initialize array for Phi-node construction. */
1967 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1968 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1969 current_ir_graph->n_loc);
1970 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1973 for (i = arity-1; i >= 0; i--)
1974 if (get_irn_op(in[i]) == op_Unknown) {
1979 if (!has_unknown) res = optimize_node(res);
1980 current_ir_graph->current_block = res;
1982 IRN_VRFY_IRG(res, current_ir_graph);
1987 /* ***********************************************************************/
1988 /* Methods necessary for automatic Phi node creation */
1990 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1991 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1992 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1993 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1995 Call Graph: ( A ---> B == A "calls" B)
1997 get_value mature_immBlock
2005 get_r_value_internal |
2009 new_rd_Phi0 new_rd_Phi_in
2011 * *************************************************************************** */
2013 /** Creates a Phi node with 0 predecessors */
2014 static INLINE ir_node *
2015 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2019 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
2020 IRN_VRFY_IRG(res, irg);
2024 /* There are two implementations of the Phi node construction. The first
2025 is faster, but does not work for blocks with more than 2 predecessors.
2026 The second works always but is slower and causes more unnecessary Phi
2028 Select the implementations by the following preprocessor flag set in
2030 #if USE_FAST_PHI_CONSTRUCTION
2032 /* This is a stack used for allocating and deallocating nodes in
2033 new_rd_Phi_in. The original implementation used the obstack
2034 to model this stack, now it is explicit. This reduces side effects.
2036 #if USE_EXPLICIT_PHI_IN_STACK
2038 new_Phi_in_stack(void) {
2041 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
2043 res->stack = NEW_ARR_F (ir_node *, 0);
2050 free_Phi_in_stack(Phi_in_stack *s) {
2051 DEL_ARR_F(s->stack);
2055 free_to_Phi_in_stack(ir_node *phi) {
2056 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
2057 current_ir_graph->Phi_in_stack->pos)
2058 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
2060 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
2062 (current_ir_graph->Phi_in_stack->pos)++;
2065 static INLINE ir_node *
2066 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
2067 int arity, ir_node **in) {
2069 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
2070 int pos = current_ir_graph->Phi_in_stack->pos;
2074 /* We need to allocate a new node */
2075 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
2076 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
2078 /* reuse the old node and initialize it again. */
2081 assert (res->kind == k_ir_node);
2082 assert (res->op == op_Phi);
2086 assert (arity >= 0);
2087 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
2088 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
2090 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
2092 (current_ir_graph->Phi_in_stack->pos)--;
2096 #endif /* USE_EXPLICIT_PHI_IN_STACK */
2098 /* Creates a Phi node with a given, fixed array **in of predecessors.
2099 If the Phi node is unnecessary, as the same value reaches the block
2100 through all control flow paths, it is eliminated and the value
2101 returned directly. This constructor is only intended for use in
2102 the automatic Phi node generation triggered by get_value or mature.
2103 The implementation is quite tricky and depends on the fact, that
2104 the nodes are allocated on a stack:
2105 The in array contains predecessors and NULLs. The NULLs appear,
2106 if get_r_value_internal, that computed the predecessors, reached
2107 the same block on two paths. In this case the same value reaches
2108 this block on both paths, there is no definition in between. We need
2109 not allocate a Phi where these path's merge, but we have to communicate
2110 this fact to the caller. This happens by returning a pointer to the
2111 node the caller _will_ allocate. (Yes, we predict the address. We can
2112 do so because the nodes are allocated on the obstack.) The caller then
2113 finds a pointer to itself and, when this routine is called again,
2116 static INLINE ir_node *
2117 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2120 ir_node *res, *known;
2122 /* Allocate a new node on the obstack. This can return a node to
2123 which some of the pointers in the in-array already point.
2124 Attention: the constructor copies the in array, i.e., the later
2125 changes to the array in this routine do not affect the
2126 constructed node! If the in array contains NULLs, there will be
2127 missing predecessors in the returned node. Is this a possible
2128 internal state of the Phi node generation? */
2129 #if USE_EXPLICIT_PHI_IN_STACK
2130 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
2132 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2133 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2136 /* The in-array can contain NULLs. These were returned by
2137 get_r_value_internal if it reached the same block/definition on a
2138 second path. The NULLs are replaced by the node itself to
2139 simplify the test in the next loop. */
2140 for (i = 0; i < ins; ++i) {
2145 /* This loop checks whether the Phi has more than one predecessor.
2146 If so, it is a real Phi node and we break the loop. Else the Phi
2147 node merges the same definition on several paths and therefore is
2149 for (i = 0; i < ins; ++i) {
2150 if (in[i] == res || in[i] == known)
2159 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2161 #if USE_EXPLICIT_PHI_IN_STACK
2162 free_to_Phi_in_stack(res);
2164 edges_node_deleted(res, current_ir_graph);
2165 obstack_free(current_ir_graph->obst, res);
2169 res = optimize_node (res);
2170 IRN_VRFY_IRG(res, irg);
2173 /* return the pointer to the Phi node. This node might be deallocated! */
2178 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2181 allocates and returns this node. The routine called to allocate the
2182 node might optimize it away and return a real value, or even a pointer
2183 to a deallocated Phi node on top of the obstack!
2184 This function is called with an in-array of proper size. **/
2186 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2188 ir_node *prevBlock, *res;
2191 /* This loop goes to all predecessor blocks of the block the Phi node is in
2192 and there finds the operands of the Phi node by calling
2193 get_r_value_internal. */
2194 for (i = 1; i <= ins; ++i) {
2195 assert (block->in[i]);
2196 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2198 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2201 /* After collecting all predecessors into the array nin a new Phi node
2202 with these predecessors is created. This constructor contains an
2203 optimization: If all predecessors of the Phi node are identical it
2204 returns the only operand instead of a new Phi node. If the value
2205 passes two different control flow edges without being defined, and
2206 this is the second path treated, a pointer to the node that will be
2207 allocated for the first path (recursion) is returned. We already
2208 know the address of this node, as it is the next node to be allocated
2209 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2210 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2212 /* Now we now the value for "pos" and can enter it in the array with
2213 all known local variables. Attention: this might be a pointer to
2214 a node, that later will be allocated!!! See new_rd_Phi_in.
2215 If this is called in mature, after some set_value in the same block,
2216 the proper value must not be overwritten:
2218 get_value (makes Phi0, put's it into graph_arr)
2219 set_value (overwrites Phi0 in graph_arr)
2220 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2223 if (!block->attr.block.graph_arr[pos]) {
2224 block->attr.block.graph_arr[pos] = res;
2226 /* printf(" value already computed by %s\n",
2227 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2233 /* This function returns the last definition of a variable. In case
2234 this variable was last defined in a previous block, Phi nodes are
2235 inserted. If the part of the firm graph containing the definition
2236 is not yet constructed, a dummy Phi node is returned. */
2238 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2241 /* There are 4 cases to treat.
2243 1. The block is not mature and we visit it the first time. We can not
2244 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2245 predecessors is returned. This node is added to the linked list (field
2246 "link") of the containing block to be completed when this block is
2247 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2250 2. The value is already known in this block, graph_arr[pos] is set and we
2251 visit the block the first time. We can return the value without
2252 creating any new nodes.
2254 3. The block is mature and we visit it the first time. A Phi node needs
2255 to be created (phi_merge). If the Phi is not needed, as all it's
2256 operands are the same value reaching the block through different
2257 paths, it's optimized away and the value itself is returned.
2259 4. The block is mature, and we visit it the second time. Now two
2260 subcases are possible:
2261 * The value was computed completely the last time we were here. This
2262 is the case if there is no loop. We can return the proper value.
2263 * The recursion that visited this node and set the flag did not
2264 return yet. We are computing a value in a loop and need to
2265 break the recursion without knowing the result yet.
2266 @@@ strange case. Straight forward we would create a Phi before
2267 starting the computation of it's predecessors. In this case we will
2268 find a Phi here in any case. The problem is that this implementation
2269 only creates a Phi after computing the predecessors, so that it is
2270 hard to compute self references of this Phi. @@@
2271 There is no simple check for the second subcase. Therefore we check
2272 for a second visit and treat all such cases as the second subcase.
2273 Anyways, the basic situation is the same: we reached a block
2274 on two paths without finding a definition of the value: No Phi
2275 nodes are needed on both paths.
2276 We return this information "Two paths, no Phi needed" by a very tricky
2277 implementation that relies on the fact that an obstack is a stack and
2278 will return a node with the same address on different allocations.
2279 Look also at phi_merge and new_rd_phi_in to understand this.
2280 @@@ Unfortunately this does not work, see testprogram
2281 three_cfpred_example.
2285 /* case 4 -- already visited. */
2286 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2288 /* visited the first time */
2289 set_irn_visited(block, get_irg_visited(current_ir_graph));
2291 /* Get the local valid value */
2292 res = block->attr.block.graph_arr[pos];
2294 /* case 2 -- If the value is actually computed, return it. */
2295 if (res) return res;
2297 if (block->attr.block.matured) { /* case 3 */
2299 /* The Phi has the same amount of ins as the corresponding block. */
2300 int ins = get_irn_arity(block);
2302 NEW_ARR_A (ir_node *, nin, ins);
2304 /* Phi merge collects the predecessors and then creates a node. */
2305 res = phi_merge (block, pos, mode, nin, ins);
2307 } else { /* case 1 */
2308 /* The block is not mature, we don't know how many in's are needed. A Phi
2309 with zero predecessors is created. Such a Phi node is called Phi0
2310 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2311 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2313 The Phi0 has to remember the pos of it's internal value. If the real
2314 Phi is computed, pos is used to update the array with the local
2317 res = new_rd_Phi0 (current_ir_graph, block, mode);
2318 res->attr.phi0_pos = pos;
2319 res->link = block->link;
2323 /* If we get here, the frontend missed a use-before-definition error */
2326 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2327 assert (mode->code >= irm_F && mode->code <= irm_P);
2328 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2329 tarval_mode_null[mode->code]);
2332 /* The local valid value is available now. */
2333 block->attr.block.graph_arr[pos] = res;
2341 it starts the recursion. This causes an Id at the entry of
2342 every block that has no definition of the value! **/
2344 #if USE_EXPLICIT_PHI_IN_STACK
2346 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2347 void free_Phi_in_stack(Phi_in_stack *s) { }
2350 static INLINE ir_node *
2351 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2352 ir_node **in, int ins, ir_node *phi0)
2355 ir_node *res, *known;
2357 /* Allocate a new node on the obstack. The allocation copies the in
2359 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2360 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2362 /* This loop checks whether the Phi has more than one predecessor.
2363 If so, it is a real Phi node and we break the loop. Else the
2364 Phi node merges the same definition on several paths and therefore
2365 is not needed. Don't consider Bad nodes! */
2367 for (i=0; i < ins; ++i)
2371 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2373 /* Optimize self referencing Phis: We can't detect them yet properly, as
2374 they still refer to the Phi0 they will replace. So replace right now. */
2375 if (phi0 && in[i] == phi0) in[i] = res;
2377 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2385 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2388 edges_node_deleted(res, current_ir_graph);
2389 obstack_free (current_ir_graph->obst, res);
2390 if (is_Phi(known)) {
2391 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2392 order, an enclosing Phi know may get superfluous. */
2393 res = optimize_in_place_2(known);
2395 exchange(known, res);
2401 /* A undefined value, e.g., in unreachable code. */
2405 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2406 IRN_VRFY_IRG(res, irg);
2407 /* Memory Phis in endless loops must be kept alive.
2408 As we can't distinguish these easily we keep all of them alive. */
2409 if ((res->op == op_Phi) && (mode == mode_M))
2410 add_End_keepalive(irg->end, res);
2417 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2419 #if PRECISE_EXC_CONTEXT
2421 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2423 /* Construct a new frag_array for node n.
2424 Copy the content from the current graph_arr of the corresponding block:
2425 this is the current state.
2426 Set ProjM(n) as current memory state.
2427 Further the last entry in frag_arr of current block points to n. This
2428 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2430 static INLINE ir_node ** new_frag_arr (ir_node *n)
2435 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2436 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2437 sizeof(ir_node *)*current_ir_graph->n_loc);
2439 /* turn off optimization before allocating Proj nodes, as res isn't
2441 opt = get_opt_optimize(); set_optimize(0);
2442 /* Here we rely on the fact that all frag ops have Memory as first result! */
2443 if (get_irn_op(n) == op_Call)
2444 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2446 assert((pn_Quot_M == pn_DivMod_M) &&
2447 (pn_Quot_M == pn_Div_M) &&
2448 (pn_Quot_M == pn_Mod_M) &&
2449 (pn_Quot_M == pn_Load_M) &&
2450 (pn_Quot_M == pn_Store_M) &&
2451 (pn_Quot_M == pn_Alloc_M) );
2452 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2456 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2461 * returns the frag_arr from a node
2463 static INLINE ir_node **
2464 get_frag_arr (ir_node *n) {
2465 switch (get_irn_opcode(n)) {
2467 return n->attr.call.exc.frag_arr;
2469 return n->attr.a.exc.frag_arr;
2471 return n->attr.load.exc.frag_arr;
2473 return n->attr.store.exc.frag_arr;
2475 return n->attr.except.frag_arr;
2480 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2482 if (!frag_arr[pos]) frag_arr[pos] = val;
2483 if (frag_arr[current_ir_graph->n_loc - 1]) {
2484 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2485 assert(arr != frag_arr && "Endless recursion detected");
2486 set_frag_value(arr, pos, val);
2491 for (i = 0; i < 1000; ++i) {
2492 if (!frag_arr[pos]) {
2493 frag_arr[pos] = val;
2495 if (frag_arr[current_ir_graph->n_loc - 1]) {
2496 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2502 assert(0 && "potential endless recursion");
2507 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2511 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2513 frag_arr = get_frag_arr(cfOp);
2514 res = frag_arr[pos];
2516 if (block->attr.block.graph_arr[pos]) {
2517 /* There was a set_value after the cfOp and no get_value before that
2518 set_value. We must build a Phi node now. */
2519 if (block->attr.block.matured) {
2520 int ins = get_irn_arity(block);
2522 NEW_ARR_A (ir_node *, nin, ins);
2523 res = phi_merge(block, pos, mode, nin, ins);
2525 res = new_rd_Phi0 (current_ir_graph, block, mode);
2526 res->attr.phi0_pos = pos;
2527 res->link = block->link;
2531 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2532 but this should be better: (remove comment if this works) */
2533 /* It's a Phi, we can write this into all graph_arrs with NULL */
2534 set_frag_value(block->attr.block.graph_arr, pos, res);
2536 res = get_r_value_internal(block, pos, mode);
2537 set_frag_value(block->attr.block.graph_arr, pos, res);
2545 computes the predecessors for the real phi node, and then
2546 allocates and returns this node. The routine called to allocate the
2547 node might optimize it away and return a real value.
2548 This function must be called with an in-array of proper size. **/
2550 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2552 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2555 /* If this block has no value at pos create a Phi0 and remember it
2556 in graph_arr to break recursions.
2557 Else we may not set graph_arr as there a later value is remembered. */
2559 if (!block->attr.block.graph_arr[pos]) {
2560 if (block == get_irg_start_block(current_ir_graph)) {
2561 /* Collapsing to Bad tarvals is no good idea.
2562 So we call a user-supplied routine here that deals with this case as
2563 appropriate for the given language. Sorrily the only help we can give
2564 here is the position.
2566 Even if all variables are defined before use, it can happen that
2567 we get to the start block, if a Cond has been replaced by a tuple
2568 (bad, jmp). In this case we call the function needlessly, eventually
2569 generating an non existent error.
2570 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2573 if (default_initialize_local_variable)
2574 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2576 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2577 /* We don't need to care about exception ops in the start block.
2578 There are none by definition. */
2579 return block->attr.block.graph_arr[pos];
2581 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2582 block->attr.block.graph_arr[pos] = phi0;
2583 #if PRECISE_EXC_CONTEXT
2584 if (get_opt_precise_exc_context()) {
2585 /* Set graph_arr for fragile ops. Also here we should break recursion.
2586 We could choose a cyclic path through an cfop. But the recursion would
2587 break at some point. */
2588 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2594 /* This loop goes to all predecessor blocks of the block the Phi node
2595 is in and there finds the operands of the Phi node by calling
2596 get_r_value_internal. */
2597 for (i = 1; i <= ins; ++i) {
2598 prevCfOp = skip_Proj(block->in[i]);
2600 if (is_Bad(prevCfOp)) {
2601 /* In case a Cond has been optimized we would get right to the start block
2602 with an invalid definition. */
2603 nin[i-1] = new_Bad();
2606 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2608 if (!is_Bad(prevBlock)) {
2609 #if PRECISE_EXC_CONTEXT
2610 if (get_opt_precise_exc_context() &&
2611 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2612 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2613 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2616 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2618 nin[i-1] = new_Bad();
2622 /* We want to pass the Phi0 node to the constructor: this finds additional
2623 optimization possibilities.
2624 The Phi0 node either is allocated in this function, or it comes from
2625 a former call to get_r_value_internal. In this case we may not yet
2626 exchange phi0, as this is done in mature_immBlock. */
2628 phi0_all = block->attr.block.graph_arr[pos];
2629 if (!((get_irn_op(phi0_all) == op_Phi) &&
2630 (get_irn_arity(phi0_all) == 0) &&
2631 (get_nodes_block(phi0_all) == block)))
2637 /* After collecting all predecessors into the array nin a new Phi node
2638 with these predecessors is created. This constructor contains an
2639 optimization: If all predecessors of the Phi node are identical it
2640 returns the only operand instead of a new Phi node. */
2641 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2643 /* In case we allocated a Phi0 node at the beginning of this procedure,
2644 we need to exchange this Phi0 with the real Phi. */
2646 exchange(phi0, res);
2647 block->attr.block.graph_arr[pos] = res;
2648 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2649 only an optimization. */
2655 /* This function returns the last definition of a variable. In case
2656 this variable was last defined in a previous block, Phi nodes are
2657 inserted. If the part of the firm graph containing the definition
2658 is not yet constructed, a dummy Phi node is returned. */
2660 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2663 /* There are 4 cases to treat.
2665 1. The block is not mature and we visit it the first time. We can not
2666 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2667 predecessors is returned. This node is added to the linked list (field
2668 "link") of the containing block to be completed when this block is
2669 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2672 2. The value is already known in this block, graph_arr[pos] is set and we
2673 visit the block the first time. We can return the value without
2674 creating any new nodes.
2676 3. The block is mature and we visit it the first time. A Phi node needs
2677 to be created (phi_merge). If the Phi is not needed, as all it's
2678 operands are the same value reaching the block through different
2679 paths, it's optimized away and the value itself is returned.
2681 4. The block is mature, and we visit it the second time. Now two
2682 subcases are possible:
2683 * The value was computed completely the last time we were here. This
2684 is the case if there is no loop. We can return the proper value.
2685 * The recursion that visited this node and set the flag did not
2686 return yet. We are computing a value in a loop and need to
2687 break the recursion. This case only happens if we visited
2688 the same block with phi_merge before, which inserted a Phi0.
2689 So we return the Phi0.
2692 /* case 4 -- already visited. */
2693 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2694 /* As phi_merge allocates a Phi0 this value is always defined. Here
2695 is the critical difference of the two algorithms. */
2696 assert(block->attr.block.graph_arr[pos]);
2697 return block->attr.block.graph_arr[pos];
2700 /* visited the first time */
2701 set_irn_visited(block, get_irg_visited(current_ir_graph));
2703 /* Get the local valid value */
2704 res = block->attr.block.graph_arr[pos];
2706 /* case 2 -- If the value is actually computed, return it. */
2707 if (res) { return res; };
2709 if (block->attr.block.matured) { /* case 3 */
2711 /* The Phi has the same amount of ins as the corresponding block. */
2712 int ins = get_irn_arity(block);
2714 NEW_ARR_A (ir_node *, nin, ins);
2716 /* Phi merge collects the predecessors and then creates a node. */
2717 res = phi_merge (block, pos, mode, nin, ins);
2719 } else { /* case 1 */
2720 /* The block is not mature, we don't know how many in's are needed. A Phi
2721 with zero predecessors is created. Such a Phi node is called Phi0
2722 node. The Phi0 is then added to the list of Phi0 nodes in this block
2723 to be matured by mature_immBlock later.
2724 The Phi0 has to remember the pos of it's internal value. If the real
2725 Phi is computed, pos is used to update the array with the local
2727 res = new_rd_Phi0 (current_ir_graph, block, mode);
2728 res->attr.phi0_pos = pos;
2729 res->link = block->link;
2733 /* If we get here, the frontend missed a use-before-definition error */
2736 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2737 assert (mode->code >= irm_F && mode->code <= irm_P);
2738 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2739 get_mode_null(mode));
2742 /* The local valid value is available now. */
2743 block->attr.block.graph_arr[pos] = res;
2748 #endif /* USE_FAST_PHI_CONSTRUCTION */
2750 /* ************************************************************************** */
2753 * Finalize a Block node, when all control flows are known.
2754 * Acceptable parameters are only Block nodes.
2757 mature_immBlock (ir_node *block)
2763 assert (get_irn_opcode(block) == iro_Block);
2764 /* @@@ should be commented in
2765 assert (!get_Block_matured(block) && "Block already matured"); */
2767 if (!get_Block_matured(block)) {
2768 ins = ARR_LEN (block->in)-1;
2769 /* Fix block parameters */
2770 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2772 /* An array for building the Phi nodes. */
2773 NEW_ARR_A (ir_node *, nin, ins);
2775 /* Traverse a chain of Phi nodes attached to this block and mature
2777 for (n = block->link; n; n=next) {
2778 inc_irg_visited(current_ir_graph);
2780 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2783 block->attr.block.matured = 1;
2785 /* Now, as the block is a finished firm node, we can optimize it.
2786 Since other nodes have been allocated since the block was created
2787 we can not free the node on the obstack. Therefore we have to call
2789 Unfortunately the optimization does not change a lot, as all allocated
2790 nodes refer to the unoptimized node.
2791 We can call _2, as global cse has no effect on blocks. */
2792 block = optimize_in_place_2(block);
2793 IRN_VRFY_IRG(block, current_ir_graph);
2798 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2800 return new_bd_Phi(db, current_ir_graph->current_block,
2805 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2807 return new_bd_Const(db, current_ir_graph->start_block,
2812 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2814 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2818 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, type *tp)
2820 return new_bd_Const_type(db, current_ir_graph->start_block,
2826 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2828 return new_bd_Id(db, current_ir_graph->current_block,
2833 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2835 return new_bd_Proj(db, current_ir_graph->current_block,
2840 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2843 assert(arg->op == op_Cond);
2844 arg->attr.c.kind = fragmentary;
2845 arg->attr.c.default_proj = max_proj;
2846 res = new_Proj (arg, mode_X, max_proj);
2851 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2853 return new_bd_Conv(db, current_ir_graph->current_block,
2858 new_d_Cast (dbg_info *db, ir_node *op, type *to_tp)
2860 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2864 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2866 return new_bd_Tuple(db, current_ir_graph->current_block,
2871 new_d_Add (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2873 return new_bd_Add(db, current_ir_graph->current_block,
2878 new_d_Sub (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2880 return new_bd_Sub(db, current_ir_graph->current_block,
2886 new_d_Minus (dbg_info *db, ir_node *op, ir_mode *mode)
2888 return new_bd_Minus(db, current_ir_graph->current_block,
2893 new_d_Mul (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2895 return new_bd_Mul(db, current_ir_graph->current_block,
2900 * allocate the frag array
2902 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2903 if (get_opt_precise_exc_context()) {
2904 if ((current_ir_graph->phase_state == phase_building) &&
2905 (get_irn_op(res) == op) && /* Could be optimized away. */
2906 !*frag_store) /* Could be a cse where the arr is already set. */ {
2907 *frag_store = new_frag_arr(res);
2914 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2917 res = new_bd_Quot (db, current_ir_graph->current_block,
2919 res->attr.except.pin_state = op_pin_state_pinned;
2920 #if PRECISE_EXC_CONTEXT
2921 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2928 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2931 res = new_bd_DivMod (db, current_ir_graph->current_block,
2933 res->attr.except.pin_state = op_pin_state_pinned;
2934 #if PRECISE_EXC_CONTEXT
2935 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2942 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2945 res = new_bd_Div (db, current_ir_graph->current_block,
2947 res->attr.except.pin_state = op_pin_state_pinned;
2948 #if PRECISE_EXC_CONTEXT
2949 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2956 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2959 res = new_bd_Mod (db, current_ir_graph->current_block,
2961 res->attr.except.pin_state = op_pin_state_pinned;
2962 #if PRECISE_EXC_CONTEXT
2963 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2970 new_d_And (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2972 return new_bd_And (db, current_ir_graph->current_block,
2977 new_d_Or (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2979 return new_bd_Or (db, current_ir_graph->current_block,
2984 new_d_Eor (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2986 return new_bd_Eor (db, current_ir_graph->current_block,
2991 new_d_Not (dbg_info *db, ir_node *op, ir_mode *mode)
2993 return new_bd_Not (db, current_ir_graph->current_block,
2998 new_d_Shl (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3000 return new_bd_Shl (db, current_ir_graph->current_block,
3005 new_d_Shr (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3007 return new_bd_Shr (db, current_ir_graph->current_block,
3012 new_d_Shrs (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3014 return new_bd_Shrs (db, current_ir_graph->current_block,
3019 new_d_Rot (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3021 return new_bd_Rot (db, current_ir_graph->current_block,
3026 new_d_Abs (dbg_info *db, ir_node *op, ir_mode *mode)
3028 return new_bd_Abs (db, current_ir_graph->current_block,
3033 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
3035 return new_bd_Cmp (db, current_ir_graph->current_block,
3040 new_d_Jmp (dbg_info *db)
3042 return new_bd_Jmp (db, current_ir_graph->current_block);
3046 new_d_IJmp (dbg_info *db, ir_node *tgt)
3048 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
3052 new_d_Cond (dbg_info *db, ir_node *c)
3054 return new_bd_Cond (db, current_ir_graph->current_block, c);
3058 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
3062 res = new_bd_Call (db, current_ir_graph->current_block,
3063 store, callee, arity, in, tp);
3064 #if PRECISE_EXC_CONTEXT
3065 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
3072 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
3074 return new_bd_Return (db, current_ir_graph->current_block,
3079 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
3081 return new_bd_Raise (db, current_ir_graph->current_block,
3086 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
3089 res = new_bd_Load (db, current_ir_graph->current_block,
3091 #if PRECISE_EXC_CONTEXT
3092 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
3099 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
3102 res = new_bd_Store (db, current_ir_graph->current_block,
3104 #if PRECISE_EXC_CONTEXT
3105 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
3112 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, type *alloc_type,
3116 res = new_bd_Alloc (db, current_ir_graph->current_block,
3117 store, size, alloc_type, where);
3118 #if PRECISE_EXC_CONTEXT
3119 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
3126 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
3127 ir_node *size, type *free_type, where_alloc where)
3129 return new_bd_Free (db, current_ir_graph->current_block,
3130 store, ptr, size, free_type, where);
3134 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
3135 /* GL: objptr was called frame before. Frame was a bad choice for the name
3136 as the operand could as well be a pointer to a dynamic object. */
3138 return new_bd_Sel (db, current_ir_graph->current_block,
3139 store, objptr, 0, NULL, ent);
3143 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
3145 return new_bd_Sel (db, current_ir_graph->current_block,
3146 store, objptr, n_index, index, sel);
3150 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
3152 return (new_bd_InstOf (db, current_ir_graph->current_block,
3153 store, objptr, ent));
3157 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, type *tp)
3159 return new_bd_SymConst_type (db, current_ir_graph->start_block,
3164 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
3166 return new_bd_SymConst (db, current_ir_graph->start_block,
3171 new_d_Sync (dbg_info *db, int arity, ir_node** in)
3173 return new_bd_Sync (db, current_ir_graph->current_block,
3180 return _new_d_Bad();
3184 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
3186 return new_bd_Confirm (db, current_ir_graph->current_block,
3191 new_d_Unknown (ir_mode *m)
3193 return new_bd_Unknown(m);
3197 new_d_CallBegin (dbg_info *db, ir_node *call)
3200 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
3205 new_d_EndReg (dbg_info *db)
3208 res = new_bd_EndReg(db, current_ir_graph->current_block);
3213 new_d_EndExcept (dbg_info *db)
3216 res = new_bd_EndExcept(db, current_ir_graph->current_block);
3221 new_d_Break (dbg_info *db)
3223 return new_bd_Break (db, current_ir_graph->current_block);
3227 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
3229 return new_bd_Filter (db, current_ir_graph->current_block,
3236 return _new_d_NoMem();
3240 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
3241 ir_node *ir_true, ir_mode *mode) {
3242 return new_bd_Mux (db, current_ir_graph->current_block,
3243 sel, ir_false, ir_true, mode);
3246 /* ********************************************************************* */
3247 /* Comfortable interface with automatic Phi node construction. */
3248 /* (Uses also constructors of ?? interface, except new_Block. */
3249 /* ********************************************************************* */
3251 /* Block construction */
3252 /* immature Block without predecessors */
3253 ir_node *new_d_immBlock (dbg_info *db) {
3256 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3257 /* creates a new dynamic in-array as length of in is -1 */
3258 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3259 current_ir_graph->current_block = res;
3260 res->attr.block.matured = 0;
3261 res->attr.block.dead = 0;
3262 /* res->attr.block.exc = exc_normal; */
3263 /* res->attr.block.handler_entry = 0; */
3264 res->attr.block.irg = current_ir_graph;
3265 res->attr.block.backedge = NULL;
3266 res->attr.block.in_cg = NULL;
3267 res->attr.block.cg_backedge = NULL;
3268 set_Block_block_visited(res, 0);
3270 /* Create and initialize array for Phi-node construction. */
3271 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3272 current_ir_graph->n_loc);
3273 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3275 /* Immature block may not be optimized! */
3276 IRN_VRFY_IRG(res, current_ir_graph);
3282 new_immBlock (void) {
3283 return new_d_immBlock(NULL);
3286 /* add an edge to a jmp/control flow node */
3288 add_immBlock_pred (ir_node *block, ir_node *jmp)
3290 if (block->attr.block.matured) {
3291 assert(0 && "Error: Block already matured!\n");
3294 assert(jmp != NULL);
3295 ARR_APP1(ir_node *, block->in, jmp);
3299 /* changing the current block */
3301 set_cur_block (ir_node *target) {
3302 current_ir_graph->current_block = target;
3305 /* ************************ */
3306 /* parameter administration */
3308 /* get a value from the parameter array from the current block by its index */
3310 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3312 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3313 inc_irg_visited(current_ir_graph);
3315 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3317 /* get a value from the parameter array from the current block by its index */
3319 get_value (int pos, ir_mode *mode)
3321 return get_d_value(NULL, pos, mode);
3324 /* set a value at position pos in the parameter array from the current block */
3326 set_value (int pos, ir_node *value)
3328 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3329 assert(pos+1 < current_ir_graph->n_loc);
3330 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3333 /* get the current store */
3337 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3338 /* GL: one could call get_value instead */
3339 inc_irg_visited(current_ir_graph);
3340 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3343 /* set the current store */
3345 set_store (ir_node *store)
3347 /* GL: one could call set_value instead */
3348 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3349 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3353 keep_alive (ir_node *ka) {
3354 add_End_keepalive(current_ir_graph->end, ka);
3357 /* --- Useful access routines --- */
3358 /* Returns the current block of the current graph. To set the current
3359 block use set_cur_block. */
3360 ir_node *get_cur_block(void) {
3361 return get_irg_current_block(current_ir_graph);
3364 /* Returns the frame type of the current graph */
3365 type *get_cur_frame_type(void) {
3366 return get_irg_frame_type(current_ir_graph);
3370 /* ********************************************************************* */
3373 /* call once for each run of the library */
3375 init_cons(uninitialized_local_variable_func_t *func)
3377 default_initialize_local_variable = func;
3380 /* call for each graph */
3382 irg_finalize_cons (ir_graph *irg) {
3383 irg->phase_state = phase_high;
3387 irp_finalize_cons (void) {
3388 int i, n_irgs = get_irp_n_irgs();
3389 for (i = 0; i < n_irgs; i++) {
3390 irg_finalize_cons(get_irp_irg(i));
3392 irp->phase_state = phase_high;\
3398 ir_node *new_Block(int arity, ir_node **in) {
3399 return new_d_Block(NULL, arity, in);
3401 ir_node *new_Start (void) {
3402 return new_d_Start(NULL);
3404 ir_node *new_End (void) {
3405 return new_d_End(NULL);
3407 ir_node *new_Jmp (void) {
3408 return new_d_Jmp(NULL);
3410 ir_node *new_IJmp (ir_node *tgt) {
3411 return new_d_IJmp(NULL, tgt);
3413 ir_node *new_Cond (ir_node *c) {
3414 return new_d_Cond(NULL, c);
3416 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3417 return new_d_Return(NULL, store, arity, in);
3419 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3420 return new_d_Raise(NULL, store, obj);
3422 ir_node *new_Const (ir_mode *mode, tarval *con) {
3423 return new_d_Const(NULL, mode, con);
3426 ir_node *new_Const_long(ir_mode *mode, long value)
3428 return new_d_Const_long(NULL, mode, value);
3431 ir_node *new_Const_type(tarval *con, type *tp) {
3432 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3435 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3436 return new_d_SymConst(NULL, value, kind);
3438 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3439 return new_d_simpleSel(NULL, store, objptr, ent);
3441 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3443 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3445 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
3446 return new_d_InstOf (NULL, store, objptr, ent);
3448 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3450 return new_d_Call(NULL, store, callee, arity, in, tp);
3452 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3453 return new_d_Add(NULL, op1, op2, mode);
3455 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3456 return new_d_Sub(NULL, op1, op2, mode);
3458 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3459 return new_d_Minus(NULL, op, mode);
3461 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3462 return new_d_Mul(NULL, op1, op2, mode);
3464 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3465 return new_d_Quot(NULL, memop, op1, op2);
3467 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3468 return new_d_DivMod(NULL, memop, op1, op2);
3470 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3471 return new_d_Div(NULL, memop, op1, op2);
3473 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3474 return new_d_Mod(NULL, memop, op1, op2);
3476 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3477 return new_d_Abs(NULL, op, mode);
3479 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3480 return new_d_And(NULL, op1, op2, mode);
3482 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3483 return new_d_Or(NULL, op1, op2, mode);
3485 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3486 return new_d_Eor(NULL, op1, op2, mode);
3488 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3489 return new_d_Not(NULL, op, mode);
3491 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3492 return new_d_Shl(NULL, op, k, mode);
3494 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3495 return new_d_Shr(NULL, op, k, mode);
3497 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3498 return new_d_Shrs(NULL, op, k, mode);
3500 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3501 return new_d_Rot(NULL, op, k, mode);
3503 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3504 return new_d_Cmp(NULL, op1, op2);
3506 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3507 return new_d_Conv(NULL, op, mode);
3509 ir_node *new_Cast (ir_node *op, type *to_tp) {
3510 return new_d_Cast(NULL, op, to_tp);
3512 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3513 return new_d_Phi(NULL, arity, in, mode);
3515 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3516 return new_d_Load(NULL, store, addr, mode);
3518 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3519 return new_d_Store(NULL, store, addr, val);
3521 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
3522 where_alloc where) {
3523 return new_d_Alloc(NULL, store, size, alloc_type, where);
3525 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3526 type *free_type, where_alloc where) {
3527 return new_d_Free(NULL, store, ptr, size, free_type, where);
3529 ir_node *new_Sync (int arity, ir_node **in) {
3530 return new_d_Sync(NULL, arity, in);
3532 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3533 return new_d_Proj(NULL, arg, mode, proj);
3535 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3536 return new_d_defaultProj(NULL, arg, max_proj);
3538 ir_node *new_Tuple (int arity, ir_node **in) {
3539 return new_d_Tuple(NULL, arity, in);
3541 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3542 return new_d_Id(NULL, val, mode);
3544 ir_node *new_Bad (void) {
3547 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3548 return new_d_Confirm (NULL, val, bound, cmp);
3550 ir_node *new_Unknown(ir_mode *m) {
3551 return new_d_Unknown(m);
3553 ir_node *new_CallBegin (ir_node *callee) {
3554 return new_d_CallBegin(NULL, callee);
3556 ir_node *new_EndReg (void) {
3557 return new_d_EndReg(NULL);
3559 ir_node *new_EndExcept (void) {
3560 return new_d_EndExcept(NULL);
3562 ir_node *new_Break (void) {
3563 return new_d_Break(NULL);
3565 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3566 return new_d_Filter(NULL, arg, mode, proj);
3568 ir_node *new_NoMem (void) {
3569 return new_d_NoMem();
3571 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3572 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);