3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* Constructs a Block with a fixed number of predecessors.
67 Does not set current_block. Can not be used with automatic
68 Phi node construction. */
70 new_bd_Block (dbg_info *db, int arity, ir_node **in)
73 ir_graph *irg = current_ir_graph;
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
86 res->attr.block.extblk = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_bd_Start (dbg_info *db, ir_node *block)
96 ir_graph *irg = current_ir_graph;
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_bd_End (dbg_info *db, ir_node *block)
109 ir_graph *irg = current_ir_graph;
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 ir_graph *irg = current_ir_graph;
125 bool has_unknown = false;
127 /* Don't assert that block matured: the use of this constructor is strongly
129 if ( get_Block_matured(block) )
130 assert( get_irn_arity(block) == arity );
132 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
134 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
136 for (i = arity-1; i >= 0; i--)
137 if (get_irn_op(in[i]) == op_Unknown) {
142 if (!has_unknown) res = optimize_node (res);
143 IRN_VRFY_IRG(res, irg);
145 /* Memory Phis in endless loops must be kept alive.
146 As we can't distinguish these easily we keep all of them alive. */
147 if ((res->op == op_Phi) && (mode == mode_M))
148 add_End_keepalive(irg->end, res);
153 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 ir_graph *irg = current_ir_graph;
158 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
159 res->attr.con.tv = con;
160 set_Const_type(res, tp); /* Call method because of complex assertion. */
161 res = optimize_node (res);
162 assert(get_Const_type(res) == tp);
163 IRN_VRFY_IRG(res, irg);
169 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
171 ir_graph *irg = current_ir_graph;
173 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
177 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
179 ir_graph *irg = current_ir_graph;
181 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
185 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
191 res = optimize_node(res);
192 IRN_VRFY_IRG(res, irg);
197 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
204 res->attr.proj = proj;
207 assert(get_Proj_pred(res));
208 assert(get_nodes_block(get_Proj_pred(res)));
210 res = optimize_node(res);
212 IRN_VRFY_IRG(res, irg);
218 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
222 ir_graph *irg = current_ir_graph;
224 assert(arg->op == op_Cond);
225 arg->attr.c.kind = fragmentary;
226 arg->attr.c.default_proj = max_proj;
227 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
232 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
235 ir_graph *irg = current_ir_graph;
237 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
238 res = optimize_node(res);
239 IRN_VRFY_IRG(res, irg);
244 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, type *to_tp)
247 ir_graph *irg = current_ir_graph;
249 assert(is_atomic_type(to_tp));
251 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
252 res->attr.cast.totype = to_tp;
253 res = optimize_node(res);
254 IRN_VRFY_IRG(res, irg);
259 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_bd_Add (dbg_info *db, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
276 ir_graph *irg = current_ir_graph;
280 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
281 res = optimize_node(res);
282 IRN_VRFY_IRG(res, irg);
287 new_bd_Sub (dbg_info *db, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
297 res = optimize_node (res);
298 IRN_VRFY_IRG(res, irg);
303 new_bd_Minus (dbg_info *db, ir_node *block,
304 ir_node *op, ir_mode *mode)
307 ir_graph *irg = current_ir_graph;
309 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
310 res = optimize_node(res);
311 IRN_VRFY_IRG(res, irg);
316 new_bd_Mul (dbg_info *db, ir_node *block,
317 ir_node *op1, ir_node *op2, ir_mode *mode)
321 ir_graph *irg = current_ir_graph;
325 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
326 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_Quot (dbg_info *db, ir_node *block,
333 ir_node *memop, ir_node *op1, ir_node *op2)
337 ir_graph *irg = current_ir_graph;
342 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
343 res = optimize_node(res);
344 IRN_VRFY_IRG(res, irg);
349 new_bd_DivMod (dbg_info *db, ir_node *block,
350 ir_node *memop, ir_node *op1, ir_node *op2)
354 ir_graph *irg = current_ir_graph;
359 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
360 res = optimize_node(res);
361 IRN_VRFY_IRG(res, irg);
366 new_bd_Div (dbg_info *db, ir_node *block,
367 ir_node *memop, ir_node *op1, ir_node *op2)
371 ir_graph *irg = current_ir_graph;
376 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
377 res = optimize_node(res);
378 IRN_VRFY_IRG(res, irg);
383 new_bd_Mod (dbg_info *db, ir_node *block,
384 ir_node *memop, ir_node *op1, ir_node *op2)
388 ir_graph *irg = current_ir_graph;
393 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
394 res = optimize_node(res);
395 IRN_VRFY_IRG(res, irg);
400 new_bd_And (dbg_info *db, ir_node *block,
401 ir_node *op1, ir_node *op2, ir_mode *mode)
405 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Or (dbg_info *db, ir_node *block,
417 ir_node *op1, ir_node *op2, ir_mode *mode)
421 ir_graph *irg = current_ir_graph;
425 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_bd_Eor (dbg_info *db, ir_node *block,
433 ir_node *op1, ir_node *op2, ir_mode *mode)
437 ir_graph *irg = current_ir_graph;
441 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
442 res = optimize_node (res);
443 IRN_VRFY_IRG(res, irg);
448 new_bd_Not (dbg_info *db, ir_node *block,
449 ir_node *op, ir_mode *mode)
452 ir_graph *irg = current_ir_graph;
454 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
455 res = optimize_node(res);
456 IRN_VRFY_IRG(res, irg);
461 new_bd_Shl (dbg_info *db, ir_node *block,
462 ir_node *op, ir_node *k, ir_mode *mode)
466 ir_graph *irg = current_ir_graph;
470 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_bd_Shr (dbg_info *db, ir_node *block,
478 ir_node *op, ir_node *k, ir_mode *mode)
482 ir_graph *irg = current_ir_graph;
486 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
487 res = optimize_node(res);
488 IRN_VRFY_IRG(res, irg);
493 new_bd_Shrs (dbg_info *db, ir_node *block,
494 ir_node *op, ir_node *k, ir_mode *mode)
498 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
503 res = optimize_node(res);
504 IRN_VRFY_IRG(res, irg);
509 new_bd_Rot (dbg_info *db, ir_node *block,
510 ir_node *op, ir_node *k, ir_mode *mode)
514 ir_graph *irg = current_ir_graph;
518 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
519 res = optimize_node(res);
520 IRN_VRFY_IRG(res, irg);
525 new_bd_Abs (dbg_info *db, ir_node *block,
526 ir_node *op, ir_mode *mode)
529 ir_graph *irg = current_ir_graph;
531 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
532 res = optimize_node (res);
533 IRN_VRFY_IRG(res, irg);
538 new_bd_Cmp (dbg_info *db, ir_node *block,
539 ir_node *op1, ir_node *op2)
543 ir_graph *irg = current_ir_graph;
548 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
549 res = optimize_node(res);
550 IRN_VRFY_IRG(res, irg);
555 new_bd_Jmp (dbg_info *db, ir_node *block)
558 ir_graph *irg = current_ir_graph;
560 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
561 res = optimize_node (res);
562 IRN_VRFY_IRG (res, irg);
567 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
570 ir_graph *irg = current_ir_graph;
572 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
573 res = optimize_node (res);
574 IRN_VRFY_IRG (res, irg);
576 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
582 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
585 ir_graph *irg = current_ir_graph;
587 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
588 res->attr.c.kind = dense;
589 res->attr.c.default_proj = 0;
590 res->attr.c.pred = COND_JMP_PRED_NONE;
591 res = optimize_node (res);
592 IRN_VRFY_IRG(res, irg);
597 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
598 ir_node *callee, int arity, ir_node **in, type *tp)
603 ir_graph *irg = current_ir_graph;
606 NEW_ARR_A(ir_node *, r_in, r_arity);
609 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
611 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
613 assert((get_unknown_type() == tp) || is_Method_type(tp));
614 set_Call_type(res, tp);
615 res->attr.call.exc.pin_state = op_pin_state_pinned;
616 res->attr.call.callee_arr = NULL;
617 res = optimize_node(res);
618 IRN_VRFY_IRG(res, irg);
623 new_bd_Return (dbg_info *db, ir_node *block,
624 ir_node *store, int arity, ir_node **in)
629 ir_graph *irg = current_ir_graph;
632 NEW_ARR_A (ir_node *, r_in, r_arity);
634 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
635 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
636 res = optimize_node(res);
637 IRN_VRFY_IRG(res, irg);
642 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
646 ir_graph *irg = current_ir_graph;
650 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
651 res = optimize_node(res);
652 IRN_VRFY_IRG(res, irg);
657 new_bd_Load (dbg_info *db, ir_node *block,
658 ir_node *store, ir_node *adr, ir_mode *mode)
662 ir_graph *irg = current_ir_graph;
666 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
667 res->attr.load.exc.pin_state = op_pin_state_pinned;
668 res->attr.load.load_mode = mode;
669 res->attr.load.volatility = volatility_non_volatile;
670 res = optimize_node(res);
671 IRN_VRFY_IRG(res, irg);
676 new_bd_Store (dbg_info *db, ir_node *block,
677 ir_node *store, ir_node *adr, ir_node *val)
681 ir_graph *irg = current_ir_graph;
686 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
687 res->attr.store.exc.pin_state = op_pin_state_pinned;
688 res->attr.store.volatility = volatility_non_volatile;
689 res = optimize_node(res);
690 IRN_VRFY_IRG(res, irg);
695 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
696 ir_node *size, type *alloc_type, where_alloc where)
700 ir_graph *irg = current_ir_graph;
704 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
705 res->attr.a.exc.pin_state = op_pin_state_pinned;
706 res->attr.a.where = where;
707 res->attr.a.type = alloc_type;
708 res = optimize_node(res);
709 IRN_VRFY_IRG(res, irg);
714 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
715 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
719 ir_graph *irg = current_ir_graph;
724 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
725 res->attr.f.where = where;
726 res->attr.f.type = free_type;
727 res = optimize_node(res);
728 IRN_VRFY_IRG(res, irg);
733 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
734 int arity, ir_node **in, entity *ent)
739 ir_graph *irg = current_ir_graph;
741 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
744 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
747 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
749 * FIXM: Sel's can select functions which should be of mode mode_P_code.
751 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
752 res->attr.s.ent = ent;
753 res = optimize_node(res);
754 IRN_VRFY_IRG(res, irg);
759 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
760 ir_node *objptr, type *ent)
765 ir_graph *irg = current_ir_graph;
768 NEW_ARR_A(ir_node *, r_in, r_arity);
772 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
773 res->attr.io.ent = ent;
775 /* res = optimize(res); */
776 IRN_VRFY_IRG(res, irg);
781 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
782 symconst_kind symkind, type *tp) {
785 ir_graph *irg = current_ir_graph;
787 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
788 mode = mode_P_data; /* FIXME: can be mode_P_code */
792 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
794 res->attr.i.num = symkind;
795 res->attr.i.sym = value;
798 res = optimize_node(res);
799 IRN_VRFY_IRG(res, irg);
804 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
805 symconst_kind symkind)
807 ir_graph *irg = current_ir_graph;
809 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
814 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
817 ir_graph *irg = current_ir_graph;
819 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
820 res = optimize_node(res);
821 IRN_VRFY_IRG(res, irg);
826 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
828 ir_node *in[2], *res;
829 ir_graph *irg = current_ir_graph;
833 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
834 res->attr.confirm_cmp = cmp;
835 res = optimize_node (res);
836 IRN_VRFY_IRG(res, irg);
840 /* this function is often called with current_ir_graph unset */
842 new_bd_Unknown (ir_mode *m)
845 ir_graph *irg = current_ir_graph;
847 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
848 res = optimize_node(res);
853 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
857 ir_graph *irg = current_ir_graph;
859 in[0] = get_Call_ptr(call);
860 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
861 /* res->attr.callbegin.irg = irg; */
862 res->attr.callbegin.call = call;
863 res = optimize_node(res);
864 IRN_VRFY_IRG(res, irg);
869 new_bd_EndReg (dbg_info *db, ir_node *block)
872 ir_graph *irg = current_ir_graph;
874 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
876 IRN_VRFY_IRG(res, irg);
881 new_bd_EndExcept (dbg_info *db, ir_node *block)
884 ir_graph *irg = current_ir_graph;
886 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
887 irg->end_except = res;
888 IRN_VRFY_IRG (res, irg);
893 new_bd_Break (dbg_info *db, ir_node *block)
896 ir_graph *irg = current_ir_graph;
898 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
899 res = optimize_node(res);
900 IRN_VRFY_IRG(res, irg);
905 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
909 ir_graph *irg = current_ir_graph;
911 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
912 res->attr.filter.proj = proj;
913 res->attr.filter.in_cg = NULL;
914 res->attr.filter.backedge = NULL;
917 assert(get_Proj_pred(res));
918 assert(get_nodes_block(get_Proj_pred(res)));
920 res = optimize_node(res);
921 IRN_VRFY_IRG(res, irg);
926 new_bd_Mux (dbg_info *db, ir_node *block,
927 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
931 ir_graph *irg = current_ir_graph;
937 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
940 res = optimize_node(res);
941 IRN_VRFY_IRG(res, irg);
945 /* --------------------------------------------- */
946 /* private interfaces, for professional use only */
947 /* --------------------------------------------- */
949 /* Constructs a Block with a fixed number of predecessors.
950 Does not set current_block. Can not be used with automatic
951 Phi node construction. */
953 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
955 ir_graph *rem = current_ir_graph;
958 current_ir_graph = irg;
959 res = new_bd_Block (db, arity, in);
960 current_ir_graph = rem;
966 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
968 ir_graph *rem = current_ir_graph;
971 current_ir_graph = irg;
972 res = new_bd_Start (db, block);
973 current_ir_graph = rem;
979 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
982 ir_graph *rem = current_ir_graph;
984 current_ir_graph = rem;
985 res = new_bd_End (db, block);
986 current_ir_graph = rem;
991 /* Creates a Phi node with all predecessors. Calling this constructor
992 is only allowed if the corresponding block is mature. */
994 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
997 ir_graph *rem = current_ir_graph;
999 current_ir_graph = irg;
1000 res = new_bd_Phi (db, block,arity, in, mode);
1001 current_ir_graph = rem;
1007 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
1010 ir_graph *rem = current_ir_graph;
1012 current_ir_graph = irg;
1013 res = new_bd_Const_type (db, block, mode, con, tp);
1014 current_ir_graph = rem;
1020 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
1023 ir_graph *rem = current_ir_graph;
1025 current_ir_graph = irg;
1026 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
1027 current_ir_graph = rem;
1033 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
1035 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
1039 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
1042 ir_graph *rem = current_ir_graph;
1044 current_ir_graph = irg;
1045 res = new_bd_Id(db, block, val, mode);
1046 current_ir_graph = rem;
1052 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1056 ir_graph *rem = current_ir_graph;
1058 current_ir_graph = irg;
1059 res = new_bd_Proj(db, block, arg, mode, proj);
1060 current_ir_graph = rem;
1066 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1070 ir_graph *rem = current_ir_graph;
1072 current_ir_graph = irg;
1073 res = new_bd_defaultProj(db, block, arg, max_proj);
1074 current_ir_graph = rem;
1080 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1083 ir_graph *rem = current_ir_graph;
1085 current_ir_graph = irg;
1086 res = new_bd_Conv(db, block, op, mode);
1087 current_ir_graph = rem;
1093 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
1096 ir_graph *rem = current_ir_graph;
1098 current_ir_graph = irg;
1099 res = new_bd_Cast(db, block, op, to_tp);
1100 current_ir_graph = rem;
1106 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1109 ir_graph *rem = current_ir_graph;
1111 current_ir_graph = irg;
1112 res = new_bd_Tuple(db, block, arity, in);
1113 current_ir_graph = rem;
1119 new_rd_Add (dbg_info *db, ir_graph *irg, ir_node *block,
1120 ir_node *op1, ir_node *op2, ir_mode *mode)
1123 ir_graph *rem = current_ir_graph;
1125 current_ir_graph = irg;
1126 res = new_bd_Add(db, block, op1, op2, mode);
1127 current_ir_graph = rem;
1133 new_rd_Sub (dbg_info *db, ir_graph *irg, ir_node *block,
1134 ir_node *op1, ir_node *op2, ir_mode *mode)
1137 ir_graph *rem = current_ir_graph;
1139 current_ir_graph = irg;
1140 res = new_bd_Sub(db, block, op1, op2, mode);
1141 current_ir_graph = rem;
1147 new_rd_Minus (dbg_info *db, ir_graph *irg, ir_node *block,
1148 ir_node *op, ir_mode *mode)
1151 ir_graph *rem = current_ir_graph;
1153 current_ir_graph = irg;
1154 res = new_bd_Minus(db, block, op, mode);
1155 current_ir_graph = rem;
1161 new_rd_Mul (dbg_info *db, ir_graph *irg, ir_node *block,
1162 ir_node *op1, ir_node *op2, ir_mode *mode)
1165 ir_graph *rem = current_ir_graph;
1167 current_ir_graph = irg;
1168 res = new_bd_Mul(db, block, op1, op2, mode);
1169 current_ir_graph = rem;
1175 new_rd_Quot (dbg_info *db, ir_graph *irg, ir_node *block,
1176 ir_node *memop, ir_node *op1, ir_node *op2)
1179 ir_graph *rem = current_ir_graph;
1181 current_ir_graph = irg;
1182 res = new_bd_Quot(db, block, memop, op1, op2);
1183 current_ir_graph = rem;
1189 new_rd_DivMod (dbg_info *db, ir_graph *irg, ir_node *block,
1190 ir_node *memop, ir_node *op1, ir_node *op2)
1193 ir_graph *rem = current_ir_graph;
1195 current_ir_graph = irg;
1196 res = new_bd_DivMod(db, block, memop, op1, op2);
1197 current_ir_graph = rem;
1203 new_rd_Div (dbg_info *db, ir_graph *irg, ir_node *block,
1204 ir_node *memop, ir_node *op1, ir_node *op2)
1207 ir_graph *rem = current_ir_graph;
1209 current_ir_graph = irg;
1210 res = new_bd_Div (db, block, memop, op1, op2);
1211 current_ir_graph =rem;
1217 new_rd_Mod (dbg_info *db, ir_graph *irg, ir_node *block,
1218 ir_node *memop, ir_node *op1, ir_node *op2)
1221 ir_graph *rem = current_ir_graph;
1223 current_ir_graph = irg;
1224 res = new_bd_Mod(db, block, memop, op1, op2);
1225 current_ir_graph = rem;
1231 new_rd_And (dbg_info *db, ir_graph *irg, ir_node *block,
1232 ir_node *op1, ir_node *op2, ir_mode *mode)
1235 ir_graph *rem = current_ir_graph;
1237 current_ir_graph = irg;
1238 res = new_bd_And(db, block, op1, op2, mode);
1239 current_ir_graph = rem;
1245 new_rd_Or (dbg_info *db, ir_graph *irg, ir_node *block,
1246 ir_node *op1, ir_node *op2, ir_mode *mode)
1249 ir_graph *rem = current_ir_graph;
1251 current_ir_graph = irg;
1252 res = new_bd_Or(db, block, op1, op2, mode);
1253 current_ir_graph = rem;
1259 new_rd_Eor (dbg_info *db, ir_graph *irg, ir_node *block,
1260 ir_node *op1, ir_node *op2, ir_mode *mode)
1263 ir_graph *rem = current_ir_graph;
1265 current_ir_graph = irg;
1266 res = new_bd_Eor(db, block, op1, op2, mode);
1267 current_ir_graph = rem;
1273 new_rd_Not (dbg_info *db, ir_graph *irg, ir_node *block,
1274 ir_node *op, ir_mode *mode)
1277 ir_graph *rem = current_ir_graph;
1279 current_ir_graph = irg;
1280 res = new_bd_Not(db, block, op, mode);
1281 current_ir_graph = rem;
1287 new_rd_Shl (dbg_info *db, ir_graph *irg, ir_node *block,
1288 ir_node *op, ir_node *k, ir_mode *mode)
1291 ir_graph *rem = current_ir_graph;
1293 current_ir_graph = irg;
1294 res = new_bd_Shl (db, block, op, k, mode);
1295 current_ir_graph = rem;
1301 new_rd_Shr (dbg_info *db, ir_graph *irg, ir_node *block,
1302 ir_node *op, ir_node *k, ir_mode *mode)
1305 ir_graph *rem = current_ir_graph;
1307 current_ir_graph = irg;
1308 res = new_bd_Shr(db, block, op, k, mode);
1309 current_ir_graph = rem;
1315 new_rd_Shrs (dbg_info *db, ir_graph *irg, ir_node *block,
1316 ir_node *op, ir_node *k, ir_mode *mode)
1319 ir_graph *rem = current_ir_graph;
1321 current_ir_graph = irg;
1322 res = new_bd_Shrs(db, block, op, k, mode);
1323 current_ir_graph = rem;
1329 new_rd_Rot (dbg_info *db, ir_graph *irg, ir_node *block,
1330 ir_node *op, ir_node *k, ir_mode *mode)
1333 ir_graph *rem = current_ir_graph;
1335 current_ir_graph = irg;
1336 res = new_bd_Rot(db, block, op, k, mode);
1337 current_ir_graph = rem;
1343 new_rd_Abs (dbg_info *db, ir_graph *irg, ir_node *block,
1344 ir_node *op, ir_mode *mode)
1347 ir_graph *rem = current_ir_graph;
1349 current_ir_graph = irg;
1350 res = new_bd_Abs(db, block, op, mode);
1351 current_ir_graph = rem;
1357 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1358 ir_node *op1, ir_node *op2)
1361 ir_graph *rem = current_ir_graph;
1363 current_ir_graph = irg;
1364 res = new_bd_Cmp(db, block, op1, op2);
1365 current_ir_graph = rem;
1371 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1374 ir_graph *rem = current_ir_graph;
1376 current_ir_graph = irg;
1377 res = new_bd_Jmp(db, block);
1378 current_ir_graph = rem;
1384 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1387 ir_graph *rem = current_ir_graph;
1389 current_ir_graph = irg;
1390 res = new_bd_IJmp(db, block, tgt);
1391 current_ir_graph = rem;
1397 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1400 ir_graph *rem = current_ir_graph;
1402 current_ir_graph = irg;
1403 res = new_bd_Cond(db, block, c);
1404 current_ir_graph = rem;
1410 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1411 ir_node *callee, int arity, ir_node **in, type *tp)
1414 ir_graph *rem = current_ir_graph;
1416 current_ir_graph = irg;
1417 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1418 current_ir_graph = rem;
1424 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1425 ir_node *store, int arity, ir_node **in)
1428 ir_graph *rem = current_ir_graph;
1430 current_ir_graph = irg;
1431 res = new_bd_Return(db, block, store, arity, in);
1432 current_ir_graph = rem;
1438 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1441 ir_graph *rem = current_ir_graph;
1443 current_ir_graph = irg;
1444 res = new_bd_Raise(db, block, store, obj);
1445 current_ir_graph = rem;
1451 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1452 ir_node *store, ir_node *adr, ir_mode *mode)
1455 ir_graph *rem = current_ir_graph;
1457 current_ir_graph = irg;
1458 res = new_bd_Load(db, block, store, adr, mode);
1459 current_ir_graph = rem;
1465 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1466 ir_node *store, ir_node *adr, ir_node *val)
1469 ir_graph *rem = current_ir_graph;
1471 current_ir_graph = irg;
1472 res = new_bd_Store(db, block, store, adr, val);
1473 current_ir_graph = rem;
1479 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1480 ir_node *size, type *alloc_type, where_alloc where)
1483 ir_graph *rem = current_ir_graph;
1485 current_ir_graph = irg;
1486 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1487 current_ir_graph = rem;
1493 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1494 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
1497 ir_graph *rem = current_ir_graph;
1499 current_ir_graph = irg;
1500 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1501 current_ir_graph = rem;
1507 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1508 int arity, ir_node **in, entity *ent)
1511 ir_graph *rem = current_ir_graph;
1513 current_ir_graph = irg;
1514 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1515 current_ir_graph = rem;
1521 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1522 ir_node *objptr, type *ent)
1525 ir_graph *rem = current_ir_graph;
1527 current_ir_graph = irg;
1528 res = new_bd_InstOf(db, block, store, objptr, ent);
1529 current_ir_graph = rem;
1535 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1536 symconst_kind symkind, type *tp)
1539 ir_graph *rem = current_ir_graph;
1541 current_ir_graph = irg;
1542 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1543 current_ir_graph = rem;
1549 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1550 symconst_kind symkind)
1552 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1556 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp)
1558 symconst_symbol sym = {(type *)symbol};
1559 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1562 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
1563 symconst_symbol sym = {(type *)symbol};
1564 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1567 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1568 symconst_symbol sym = {symbol};
1569 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1572 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1573 symconst_symbol sym = {symbol};
1574 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1578 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1581 ir_graph *rem = current_ir_graph;
1583 current_ir_graph = irg;
1584 res = new_bd_Sync(db, block, arity, in);
1585 current_ir_graph = rem;
1591 new_rd_Bad (ir_graph *irg)
1597 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1600 ir_graph *rem = current_ir_graph;
1602 current_ir_graph = irg;
1603 res = new_bd_Confirm(db, block, val, bound, cmp);
1604 current_ir_graph = rem;
1609 /* this function is often called with current_ir_graph unset */
1611 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1614 ir_graph *rem = current_ir_graph;
1616 current_ir_graph = irg;
1617 res = new_bd_Unknown(m);
1618 current_ir_graph = rem;
1624 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1627 ir_graph *rem = current_ir_graph;
1629 current_ir_graph = irg;
1630 res = new_bd_CallBegin(db, block, call);
1631 current_ir_graph = rem;
1637 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1641 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1643 IRN_VRFY_IRG(res, irg);
1648 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1652 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1653 irg->end_except = res;
1654 IRN_VRFY_IRG (res, irg);
1659 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1662 ir_graph *rem = current_ir_graph;
1664 current_ir_graph = irg;
1665 res = new_bd_Break(db, block);
1666 current_ir_graph = rem;
1672 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1676 ir_graph *rem = current_ir_graph;
1678 current_ir_graph = irg;
1679 res = new_bd_Filter(db, block, arg, mode, proj);
1680 current_ir_graph = rem;
1686 new_rd_NoMem (ir_graph *irg) {
1691 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1692 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1695 ir_graph *rem = current_ir_graph;
1697 current_ir_graph = irg;
1698 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1699 current_ir_graph = rem;
1704 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1705 return new_rd_Block(NULL, irg, arity, in);
1707 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1708 return new_rd_Start(NULL, irg, block);
1710 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1711 return new_rd_End(NULL, irg, block);
1713 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1714 return new_rd_Jmp(NULL, irg, block);
1716 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1717 return new_rd_IJmp(NULL, irg, block, tgt);
1719 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1720 return new_rd_Cond(NULL, irg, block, c);
1722 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1723 ir_node *store, int arity, ir_node **in) {
1724 return new_rd_Return(NULL, irg, block, store, arity, in);
1726 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1727 ir_node *store, ir_node *obj) {
1728 return new_rd_Raise(NULL, irg, block, store, obj);
1730 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1731 ir_mode *mode, tarval *con) {
1732 return new_rd_Const(NULL, irg, block, mode, con);
1735 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1736 ir_mode *mode, long value) {
1737 return new_rd_Const_long(NULL, irg, block, mode, value);
1740 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1741 ir_mode *mode, tarval *con, type *tp) {
1742 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1745 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1746 symconst_symbol value, symconst_kind symkind) {
1747 return new_rd_SymConst(NULL, irg, block, value, symkind);
1749 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1750 ir_node *objptr, int n_index, ir_node **index,
1752 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1754 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1756 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
1758 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1759 ir_node *callee, int arity, ir_node **in,
1761 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1763 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1764 ir_node *op1, ir_node *op2, ir_mode *mode) {
1765 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1767 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1768 ir_node *op1, ir_node *op2, ir_mode *mode) {
1769 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1771 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1772 ir_node *op, ir_mode *mode) {
1773 return new_rd_Minus(NULL, irg, block, op, mode);
1775 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1776 ir_node *op1, ir_node *op2, ir_mode *mode) {
1777 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1779 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1780 ir_node *memop, ir_node *op1, ir_node *op2) {
1781 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1783 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1784 ir_node *memop, ir_node *op1, ir_node *op2) {
1785 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1787 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1788 ir_node *memop, ir_node *op1, ir_node *op2) {
1789 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1791 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1792 ir_node *memop, ir_node *op1, ir_node *op2) {
1793 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1795 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1796 ir_node *op, ir_mode *mode) {
1797 return new_rd_Abs(NULL, irg, block, op, mode);
1799 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1800 ir_node *op1, ir_node *op2, ir_mode *mode) {
1801 return new_rd_And(NULL, irg, block, op1, op2, mode);
1803 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1804 ir_node *op1, ir_node *op2, ir_mode *mode) {
1805 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1807 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1808 ir_node *op1, ir_node *op2, ir_mode *mode) {
1809 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1811 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1812 ir_node *op, ir_mode *mode) {
1813 return new_rd_Not(NULL, irg, block, op, mode);
1815 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1816 ir_node *op1, ir_node *op2) {
1817 return new_rd_Cmp(NULL, irg, block, op1, op2);
1819 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1820 ir_node *op, ir_node *k, ir_mode *mode) {
1821 return new_rd_Shl(NULL, irg, block, op, k, mode);
1823 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1824 ir_node *op, ir_node *k, ir_mode *mode) {
1825 return new_rd_Shr(NULL, irg, block, op, k, mode);
1827 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1828 ir_node *op, ir_node *k, ir_mode *mode) {
1829 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1831 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1832 ir_node *op, ir_node *k, ir_mode *mode) {
1833 return new_rd_Rot(NULL, irg, block, op, k, mode);
1835 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1836 ir_node *op, ir_mode *mode) {
1837 return new_rd_Conv(NULL, irg, block, op, mode);
1839 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1840 return new_rd_Cast(NULL, irg, block, op, to_tp);
1842 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1843 ir_node **in, ir_mode *mode) {
1844 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1846 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1847 ir_node *store, ir_node *adr, ir_mode *mode) {
1848 return new_rd_Load(NULL, irg, block, store, adr, mode);
1850 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1851 ir_node *store, ir_node *adr, ir_node *val) {
1852 return new_rd_Store(NULL, irg, block, store, adr, val);
1854 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1855 ir_node *size, type *alloc_type, where_alloc where) {
1856 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1858 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1859 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1860 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1862 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1863 return new_rd_Sync(NULL, irg, block, arity, in);
1865 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1866 ir_mode *mode, long proj) {
1867 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1869 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1871 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1873 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1874 int arity, ir_node **in) {
1875 return new_rd_Tuple(NULL, irg, block, arity, in );
1877 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1878 ir_node *val, ir_mode *mode) {
1879 return new_rd_Id(NULL, irg, block, val, mode);
1881 ir_node *new_r_Bad (ir_graph *irg) {
1882 return new_rd_Bad(irg);
1884 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1885 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1887 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1888 return new_rd_Unknown(irg, m);
1890 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1891 return new_rd_CallBegin(NULL, irg, block, callee);
1893 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1894 return new_rd_EndReg(NULL, irg, block);
1896 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1897 return new_rd_EndExcept(NULL, irg, block);
1899 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1900 return new_rd_Break(NULL, irg, block);
1902 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1903 ir_mode *mode, long proj) {
1904 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1906 ir_node *new_r_NoMem (ir_graph *irg) {
1907 return new_rd_NoMem(irg);
1909 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1910 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1911 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1914 /** ********************/
1915 /** public interfaces */
1916 /** construction tools */
1920 * - create a new Start node in the current block
1922 * @return s - pointer to the created Start node
1927 new_d_Start (dbg_info *db)
1931 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1932 op_Start, mode_T, 0, NULL);
1933 /* res->attr.start.irg = current_ir_graph; */
1935 res = optimize_node(res);
1936 IRN_VRFY_IRG(res, current_ir_graph);
1941 new_d_End (dbg_info *db)
1944 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1945 op_End, mode_X, -1, NULL);
1946 res = optimize_node(res);
1947 IRN_VRFY_IRG(res, current_ir_graph);
1952 /* Constructs a Block with a fixed number of predecessors.
1953 Does set current_block. Can be used with automatic Phi
1954 node construction. */
1956 new_d_Block (dbg_info *db, int arity, ir_node **in)
1960 bool has_unknown = false;
1962 res = new_bd_Block(db, arity, in);
1964 /* Create and initialize array for Phi-node construction. */
1965 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1966 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1967 current_ir_graph->n_loc);
1968 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1971 for (i = arity-1; i >= 0; i--)
1972 if (get_irn_op(in[i]) == op_Unknown) {
1977 if (!has_unknown) res = optimize_node(res);
1978 current_ir_graph->current_block = res;
1980 IRN_VRFY_IRG(res, current_ir_graph);
1985 /* ***********************************************************************/
1986 /* Methods necessary for automatic Phi node creation */
1988 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1989 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1990 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1991 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1993 Call Graph: ( A ---> B == A "calls" B)
1995 get_value mature_immBlock
2003 get_r_value_internal |
2007 new_rd_Phi0 new_rd_Phi_in
2009 * *************************************************************************** */
2011 /** Creates a Phi node with 0 predecessors */
2012 static INLINE ir_node *
2013 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2017 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
2018 IRN_VRFY_IRG(res, irg);
2022 /* There are two implementations of the Phi node construction. The first
2023 is faster, but does not work for blocks with more than 2 predecessors.
2024 The second works always but is slower and causes more unnecessary Phi
2026 Select the implementations by the following preprocessor flag set in
2028 #if USE_FAST_PHI_CONSTRUCTION
2030 /* This is a stack used for allocating and deallocating nodes in
2031 new_rd_Phi_in. The original implementation used the obstack
2032 to model this stack, now it is explicit. This reduces side effects.
2034 #if USE_EXPLICIT_PHI_IN_STACK
2036 new_Phi_in_stack(void) {
2039 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
2041 res->stack = NEW_ARR_F (ir_node *, 0);
2048 free_Phi_in_stack(Phi_in_stack *s) {
2049 DEL_ARR_F(s->stack);
2053 free_to_Phi_in_stack(ir_node *phi) {
2054 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
2055 current_ir_graph->Phi_in_stack->pos)
2056 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
2058 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
2060 (current_ir_graph->Phi_in_stack->pos)++;
2063 static INLINE ir_node *
2064 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
2065 int arity, ir_node **in) {
2067 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
2068 int pos = current_ir_graph->Phi_in_stack->pos;
2072 /* We need to allocate a new node */
2073 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
2074 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
2076 /* reuse the old node and initialize it again. */
2079 assert (res->kind == k_ir_node);
2080 assert (res->op == op_Phi);
2084 assert (arity >= 0);
2085 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
2086 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
2088 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
2090 (current_ir_graph->Phi_in_stack->pos)--;
2094 #endif /* USE_EXPLICIT_PHI_IN_STACK */
2096 /* Creates a Phi node with a given, fixed array **in of predecessors.
2097 If the Phi node is unnecessary, as the same value reaches the block
2098 through all control flow paths, it is eliminated and the value
2099 returned directly. This constructor is only intended for use in
2100 the automatic Phi node generation triggered by get_value or mature.
2101 The implementation is quite tricky and depends on the fact, that
2102 the nodes are allocated on a stack:
2103 The in array contains predecessors and NULLs. The NULLs appear,
2104 if get_r_value_internal, that computed the predecessors, reached
2105 the same block on two paths. In this case the same value reaches
2106 this block on both paths, there is no definition in between. We need
2107 not allocate a Phi where these path's merge, but we have to communicate
2108 this fact to the caller. This happens by returning a pointer to the
2109 node the caller _will_ allocate. (Yes, we predict the address. We can
2110 do so because the nodes are allocated on the obstack.) The caller then
2111 finds a pointer to itself and, when this routine is called again,
2114 static INLINE ir_node *
2115 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2118 ir_node *res, *known;
2120 /* Allocate a new node on the obstack. This can return a node to
2121 which some of the pointers in the in-array already point.
2122 Attention: the constructor copies the in array, i.e., the later
2123 changes to the array in this routine do not affect the
2124 constructed node! If the in array contains NULLs, there will be
2125 missing predecessors in the returned node. Is this a possible
2126 internal state of the Phi node generation? */
2127 #if USE_EXPLICIT_PHI_IN_STACK
2128 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
2130 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2131 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2134 /* The in-array can contain NULLs. These were returned by
2135 get_r_value_internal if it reached the same block/definition on a
2136 second path. The NULLs are replaced by the node itself to
2137 simplify the test in the next loop. */
2138 for (i = 0; i < ins; ++i) {
2143 /* This loop checks whether the Phi has more than one predecessor.
2144 If so, it is a real Phi node and we break the loop. Else the Phi
2145 node merges the same definition on several paths and therefore is
2147 for (i = 0; i < ins; ++i) {
2148 if (in[i] == res || in[i] == known)
2157 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2159 #if USE_EXPLICIT_PHI_IN_STACK
2160 free_to_Phi_in_stack(res);
2162 edges_node_deleted(res, current_ir_graph);
2163 obstack_free(current_ir_graph->obst, res);
2167 res = optimize_node (res);
2168 IRN_VRFY_IRG(res, irg);
2171 /* return the pointer to the Phi node. This node might be deallocated! */
2176 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2179 allocates and returns this node. The routine called to allocate the
2180 node might optimize it away and return a real value, or even a pointer
2181 to a deallocated Phi node on top of the obstack!
2182 This function is called with an in-array of proper size. **/
2184 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2186 ir_node *prevBlock, *res;
2189 /* This loop goes to all predecessor blocks of the block the Phi node is in
2190 and there finds the operands of the Phi node by calling
2191 get_r_value_internal. */
2192 for (i = 1; i <= ins; ++i) {
2193 assert (block->in[i]);
2194 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2196 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2199 /* After collecting all predecessors into the array nin a new Phi node
2200 with these predecessors is created. This constructor contains an
2201 optimization: If all predecessors of the Phi node are identical it
2202 returns the only operand instead of a new Phi node. If the value
2203 passes two different control flow edges without being defined, and
2204 this is the second path treated, a pointer to the node that will be
2205 allocated for the first path (recursion) is returned. We already
2206 know the address of this node, as it is the next node to be allocated
2207 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2208 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2210 /* Now we now the value for "pos" and can enter it in the array with
2211 all known local variables. Attention: this might be a pointer to
2212 a node, that later will be allocated!!! See new_rd_Phi_in.
2213 If this is called in mature, after some set_value in the same block,
2214 the proper value must not be overwritten:
2216 get_value (makes Phi0, put's it into graph_arr)
2217 set_value (overwrites Phi0 in graph_arr)
2218 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2221 if (!block->attr.block.graph_arr[pos]) {
2222 block->attr.block.graph_arr[pos] = res;
2224 /* printf(" value already computed by %s\n",
2225 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2231 /* This function returns the last definition of a variable. In case
2232 this variable was last defined in a previous block, Phi nodes are
2233 inserted. If the part of the firm graph containing the definition
2234 is not yet constructed, a dummy Phi node is returned. */
2236 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2239 /* There are 4 cases to treat.
2241 1. The block is not mature and we visit it the first time. We can not
2242 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2243 predecessors is returned. This node is added to the linked list (field
2244 "link") of the containing block to be completed when this block is
2245 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2248 2. The value is already known in this block, graph_arr[pos] is set and we
2249 visit the block the first time. We can return the value without
2250 creating any new nodes.
2252 3. The block is mature and we visit it the first time. A Phi node needs
2253 to be created (phi_merge). If the Phi is not needed, as all it's
2254 operands are the same value reaching the block through different
2255 paths, it's optimized away and the value itself is returned.
2257 4. The block is mature, and we visit it the second time. Now two
2258 subcases are possible:
2259 * The value was computed completely the last time we were here. This
2260 is the case if there is no loop. We can return the proper value.
2261 * The recursion that visited this node and set the flag did not
2262 return yet. We are computing a value in a loop and need to
2263 break the recursion without knowing the result yet.
2264 @@@ strange case. Straight forward we would create a Phi before
2265 starting the computation of it's predecessors. In this case we will
2266 find a Phi here in any case. The problem is that this implementation
2267 only creates a Phi after computing the predecessors, so that it is
2268 hard to compute self references of this Phi. @@@
2269 There is no simple check for the second subcase. Therefore we check
2270 for a second visit and treat all such cases as the second subcase.
2271 Anyways, the basic situation is the same: we reached a block
2272 on two paths without finding a definition of the value: No Phi
2273 nodes are needed on both paths.
2274 We return this information "Two paths, no Phi needed" by a very tricky
2275 implementation that relies on the fact that an obstack is a stack and
2276 will return a node with the same address on different allocations.
2277 Look also at phi_merge and new_rd_phi_in to understand this.
2278 @@@ Unfortunately this does not work, see testprogram
2279 three_cfpred_example.
2283 /* case 4 -- already visited. */
2284 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2286 /* visited the first time */
2287 set_irn_visited(block, get_irg_visited(current_ir_graph));
2289 /* Get the local valid value */
2290 res = block->attr.block.graph_arr[pos];
2292 /* case 2 -- If the value is actually computed, return it. */
2293 if (res) return res;
2295 if (block->attr.block.matured) { /* case 3 */
2297 /* The Phi has the same amount of ins as the corresponding block. */
2298 int ins = get_irn_arity(block);
2300 NEW_ARR_A (ir_node *, nin, ins);
2302 /* Phi merge collects the predecessors and then creates a node. */
2303 res = phi_merge (block, pos, mode, nin, ins);
2305 } else { /* case 1 */
2306 /* The block is not mature, we don't know how many in's are needed. A Phi
2307 with zero predecessors is created. Such a Phi node is called Phi0
2308 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2309 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2311 The Phi0 has to remember the pos of it's internal value. If the real
2312 Phi is computed, pos is used to update the array with the local
2315 res = new_rd_Phi0 (current_ir_graph, block, mode);
2316 res->attr.phi0_pos = pos;
2317 res->link = block->link;
2321 /* If we get here, the frontend missed a use-before-definition error */
2324 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2325 assert (mode->code >= irm_F && mode->code <= irm_P);
2326 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2327 tarval_mode_null[mode->code]);
2330 /* The local valid value is available now. */
2331 block->attr.block.graph_arr[pos] = res;
2339 it starts the recursion. This causes an Id at the entry of
2340 every block that has no definition of the value! **/
2342 #if USE_EXPLICIT_PHI_IN_STACK
2344 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2345 void free_Phi_in_stack(Phi_in_stack *s) { }
2348 static INLINE ir_node *
2349 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2350 ir_node **in, int ins, ir_node *phi0)
2353 ir_node *res, *known;
2355 /* Allocate a new node on the obstack. The allocation copies the in
2357 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2358 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2360 /* This loop checks whether the Phi has more than one predecessor.
2361 If so, it is a real Phi node and we break the loop. Else the
2362 Phi node merges the same definition on several paths and therefore
2363 is not needed. Don't consider Bad nodes! */
2365 for (i=0; i < ins; ++i)
2369 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2371 /* Optimize self referencing Phis: We can't detect them yet properly, as
2372 they still refer to the Phi0 they will replace. So replace right now. */
2373 if (phi0 && in[i] == phi0) in[i] = res;
2375 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2383 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2386 edges_node_deleted(res, current_ir_graph);
2387 obstack_free (current_ir_graph->obst, res);
2388 if (is_Phi(known)) {
2389 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2390 order, an enclosing Phi know may get superfluous. */
2391 res = optimize_in_place_2(known);
2393 exchange(known, res);
2399 /* A undefined value, e.g., in unreachable code. */
2403 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2404 IRN_VRFY_IRG(res, irg);
2405 /* Memory Phis in endless loops must be kept alive.
2406 As we can't distinguish these easily we keep all of them alive. */
2407 if ((res->op == op_Phi) && (mode == mode_M))
2408 add_End_keepalive(irg->end, res);
2415 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2417 #if PRECISE_EXC_CONTEXT
2419 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2421 /* Construct a new frag_array for node n.
2422 Copy the content from the current graph_arr of the corresponding block:
2423 this is the current state.
2424 Set ProjM(n) as current memory state.
2425 Further the last entry in frag_arr of current block points to n. This
2426 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2428 static INLINE ir_node ** new_frag_arr (ir_node *n)
2433 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2434 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2435 sizeof(ir_node *)*current_ir_graph->n_loc);
2437 /* turn off optimization before allocating Proj nodes, as res isn't
2439 opt = get_opt_optimize(); set_optimize(0);
2440 /* Here we rely on the fact that all frag ops have Memory as first result! */
2441 if (get_irn_op(n) == op_Call)
2442 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2444 assert((pn_Quot_M == pn_DivMod_M) &&
2445 (pn_Quot_M == pn_Div_M) &&
2446 (pn_Quot_M == pn_Mod_M) &&
2447 (pn_Quot_M == pn_Load_M) &&
2448 (pn_Quot_M == pn_Store_M) &&
2449 (pn_Quot_M == pn_Alloc_M) );
2450 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2454 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2459 * returns the frag_arr from a node
2461 static INLINE ir_node **
2462 get_frag_arr (ir_node *n) {
2463 switch (get_irn_opcode(n)) {
2465 return n->attr.call.exc.frag_arr;
2467 return n->attr.a.exc.frag_arr;
2469 return n->attr.load.exc.frag_arr;
2471 return n->attr.store.exc.frag_arr;
2473 return n->attr.except.frag_arr;
2478 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2480 if (!frag_arr[pos]) frag_arr[pos] = val;
2481 if (frag_arr[current_ir_graph->n_loc - 1]) {
2482 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2483 assert(arr != frag_arr && "Endless recursion detected");
2484 set_frag_value(arr, pos, val);
2489 for (i = 0; i < 1000; ++i) {
2490 if (!frag_arr[pos]) {
2491 frag_arr[pos] = val;
2493 if (frag_arr[current_ir_graph->n_loc - 1]) {
2494 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2500 assert(0 && "potential endless recursion");
2505 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2509 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2511 frag_arr = get_frag_arr(cfOp);
2512 res = frag_arr[pos];
2514 if (block->attr.block.graph_arr[pos]) {
2515 /* There was a set_value after the cfOp and no get_value before that
2516 set_value. We must build a Phi node now. */
2517 if (block->attr.block.matured) {
2518 int ins = get_irn_arity(block);
2520 NEW_ARR_A (ir_node *, nin, ins);
2521 res = phi_merge(block, pos, mode, nin, ins);
2523 res = new_rd_Phi0 (current_ir_graph, block, mode);
2524 res->attr.phi0_pos = pos;
2525 res->link = block->link;
2529 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2530 but this should be better: (remove comment if this works) */
2531 /* It's a Phi, we can write this into all graph_arrs with NULL */
2532 set_frag_value(block->attr.block.graph_arr, pos, res);
2534 res = get_r_value_internal(block, pos, mode);
2535 set_frag_value(block->attr.block.graph_arr, pos, res);
2543 computes the predecessors for the real phi node, and then
2544 allocates and returns this node. The routine called to allocate the
2545 node might optimize it away and return a real value.
2546 This function must be called with an in-array of proper size. **/
2548 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2550 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2553 /* If this block has no value at pos create a Phi0 and remember it
2554 in graph_arr to break recursions.
2555 Else we may not set graph_arr as there a later value is remembered. */
2557 if (!block->attr.block.graph_arr[pos]) {
2558 if (block == get_irg_start_block(current_ir_graph)) {
2559 /* Collapsing to Bad tarvals is no good idea.
2560 So we call a user-supplied routine here that deals with this case as
2561 appropriate for the given language. Sorrily the only help we can give
2562 here is the position.
2564 Even if all variables are defined before use, it can happen that
2565 we get to the start block, if a Cond has been replaced by a tuple
2566 (bad, jmp). In this case we call the function needlessly, eventually
2567 generating an non existent error.
2568 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2571 if (default_initialize_local_variable) {
2572 ir_node *rem = get_cur_block();
2574 set_cur_block(block);
2575 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2579 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2580 /* We don't need to care about exception ops in the start block.
2581 There are none by definition. */
2582 return block->attr.block.graph_arr[pos];
2584 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2585 block->attr.block.graph_arr[pos] = phi0;
2586 #if PRECISE_EXC_CONTEXT
2587 if (get_opt_precise_exc_context()) {
2588 /* Set graph_arr for fragile ops. Also here we should break recursion.
2589 We could choose a cyclic path through an cfop. But the recursion would
2590 break at some point. */
2591 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2597 /* This loop goes to all predecessor blocks of the block the Phi node
2598 is in and there finds the operands of the Phi node by calling
2599 get_r_value_internal. */
2600 for (i = 1; i <= ins; ++i) {
2601 prevCfOp = skip_Proj(block->in[i]);
2603 if (is_Bad(prevCfOp)) {
2604 /* In case a Cond has been optimized we would get right to the start block
2605 with an invalid definition. */
2606 nin[i-1] = new_Bad();
2609 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2611 if (!is_Bad(prevBlock)) {
2612 #if PRECISE_EXC_CONTEXT
2613 if (get_opt_precise_exc_context() &&
2614 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2615 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2616 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2619 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2621 nin[i-1] = new_Bad();
2625 /* We want to pass the Phi0 node to the constructor: this finds additional
2626 optimization possibilities.
2627 The Phi0 node either is allocated in this function, or it comes from
2628 a former call to get_r_value_internal. In this case we may not yet
2629 exchange phi0, as this is done in mature_immBlock. */
2631 phi0_all = block->attr.block.graph_arr[pos];
2632 if (!((get_irn_op(phi0_all) == op_Phi) &&
2633 (get_irn_arity(phi0_all) == 0) &&
2634 (get_nodes_block(phi0_all) == block)))
2640 /* After collecting all predecessors into the array nin a new Phi node
2641 with these predecessors is created. This constructor contains an
2642 optimization: If all predecessors of the Phi node are identical it
2643 returns the only operand instead of a new Phi node. */
2644 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2646 /* In case we allocated a Phi0 node at the beginning of this procedure,
2647 we need to exchange this Phi0 with the real Phi. */
2649 exchange(phi0, res);
2650 block->attr.block.graph_arr[pos] = res;
2651 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2652 only an optimization. */
2658 /* This function returns the last definition of a variable. In case
2659 this variable was last defined in a previous block, Phi nodes are
2660 inserted. If the part of the firm graph containing the definition
2661 is not yet constructed, a dummy Phi node is returned. */
2663 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2666 /* There are 4 cases to treat.
2668 1. The block is not mature and we visit it the first time. We can not
2669 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2670 predecessors is returned. This node is added to the linked list (field
2671 "link") of the containing block to be completed when this block is
2672 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2675 2. The value is already known in this block, graph_arr[pos] is set and we
2676 visit the block the first time. We can return the value without
2677 creating any new nodes.
2679 3. The block is mature and we visit it the first time. A Phi node needs
2680 to be created (phi_merge). If the Phi is not needed, as all it's
2681 operands are the same value reaching the block through different
2682 paths, it's optimized away and the value itself is returned.
2684 4. The block is mature, and we visit it the second time. Now two
2685 subcases are possible:
2686 * The value was computed completely the last time we were here. This
2687 is the case if there is no loop. We can return the proper value.
2688 * The recursion that visited this node and set the flag did not
2689 return yet. We are computing a value in a loop and need to
2690 break the recursion. This case only happens if we visited
2691 the same block with phi_merge before, which inserted a Phi0.
2692 So we return the Phi0.
2695 /* case 4 -- already visited. */
2696 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2697 /* As phi_merge allocates a Phi0 this value is always defined. Here
2698 is the critical difference of the two algorithms. */
2699 assert(block->attr.block.graph_arr[pos]);
2700 return block->attr.block.graph_arr[pos];
2703 /* visited the first time */
2704 set_irn_visited(block, get_irg_visited(current_ir_graph));
2706 /* Get the local valid value */
2707 res = block->attr.block.graph_arr[pos];
2709 /* case 2 -- If the value is actually computed, return it. */
2710 if (res) { return res; };
2712 if (block->attr.block.matured) { /* case 3 */
2714 /* The Phi has the same amount of ins as the corresponding block. */
2715 int ins = get_irn_arity(block);
2717 NEW_ARR_A (ir_node *, nin, ins);
2719 /* Phi merge collects the predecessors and then creates a node. */
2720 res = phi_merge (block, pos, mode, nin, ins);
2722 } else { /* case 1 */
2723 /* The block is not mature, we don't know how many in's are needed. A Phi
2724 with zero predecessors is created. Such a Phi node is called Phi0
2725 node. The Phi0 is then added to the list of Phi0 nodes in this block
2726 to be matured by mature_immBlock later.
2727 The Phi0 has to remember the pos of it's internal value. If the real
2728 Phi is computed, pos is used to update the array with the local
2730 res = new_rd_Phi0 (current_ir_graph, block, mode);
2731 res->attr.phi0_pos = pos;
2732 res->link = block->link;
2736 /* If we get here, the frontend missed a use-before-definition error */
2739 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2740 assert (mode->code >= irm_F && mode->code <= irm_P);
2741 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2742 get_mode_null(mode));
2745 /* The local valid value is available now. */
2746 block->attr.block.graph_arr[pos] = res;
2751 #endif /* USE_FAST_PHI_CONSTRUCTION */
2753 /* ************************************************************************** */
2756 * Finalize a Block node, when all control flows are known.
2757 * Acceptable parameters are only Block nodes.
2760 mature_immBlock (ir_node *block)
2766 assert (get_irn_opcode(block) == iro_Block);
2767 /* @@@ should be commented in
2768 assert (!get_Block_matured(block) && "Block already matured"); */
2770 if (!get_Block_matured(block)) {
2771 ins = ARR_LEN (block->in)-1;
2772 /* Fix block parameters */
2773 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2775 /* An array for building the Phi nodes. */
2776 NEW_ARR_A (ir_node *, nin, ins);
2778 /* Traverse a chain of Phi nodes attached to this block and mature
2780 for (n = block->link; n; n=next) {
2781 inc_irg_visited(current_ir_graph);
2783 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2786 block->attr.block.matured = 1;
2788 /* Now, as the block is a finished firm node, we can optimize it.
2789 Since other nodes have been allocated since the block was created
2790 we can not free the node on the obstack. Therefore we have to call
2792 Unfortunately the optimization does not change a lot, as all allocated
2793 nodes refer to the unoptimized node.
2794 We can call _2, as global cse has no effect on blocks. */
2795 block = optimize_in_place_2(block);
2796 IRN_VRFY_IRG(block, current_ir_graph);
2801 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2803 return new_bd_Phi(db, current_ir_graph->current_block,
2808 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2810 return new_bd_Const(db, current_ir_graph->start_block,
2815 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2817 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2821 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, type *tp)
2823 return new_bd_Const_type(db, current_ir_graph->start_block,
2829 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2831 return new_bd_Id(db, current_ir_graph->current_block,
2836 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2838 return new_bd_Proj(db, current_ir_graph->current_block,
2843 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2846 assert(arg->op == op_Cond);
2847 arg->attr.c.kind = fragmentary;
2848 arg->attr.c.default_proj = max_proj;
2849 res = new_Proj (arg, mode_X, max_proj);
2854 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2856 return new_bd_Conv(db, current_ir_graph->current_block,
2861 new_d_Cast (dbg_info *db, ir_node *op, type *to_tp)
2863 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2867 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2869 return new_bd_Tuple(db, current_ir_graph->current_block,
2874 new_d_Add (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2876 return new_bd_Add(db, current_ir_graph->current_block,
2881 new_d_Sub (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2883 return new_bd_Sub(db, current_ir_graph->current_block,
2889 new_d_Minus (dbg_info *db, ir_node *op, ir_mode *mode)
2891 return new_bd_Minus(db, current_ir_graph->current_block,
2896 new_d_Mul (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2898 return new_bd_Mul(db, current_ir_graph->current_block,
2903 * allocate the frag array
2905 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2906 if (get_opt_precise_exc_context()) {
2907 if ((current_ir_graph->phase_state == phase_building) &&
2908 (get_irn_op(res) == op) && /* Could be optimized away. */
2909 !*frag_store) /* Could be a cse where the arr is already set. */ {
2910 *frag_store = new_frag_arr(res);
2917 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2920 res = new_bd_Quot (db, current_ir_graph->current_block,
2922 res->attr.except.pin_state = op_pin_state_pinned;
2923 #if PRECISE_EXC_CONTEXT
2924 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2931 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2934 res = new_bd_DivMod (db, current_ir_graph->current_block,
2936 res->attr.except.pin_state = op_pin_state_pinned;
2937 #if PRECISE_EXC_CONTEXT
2938 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2945 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2948 res = new_bd_Div (db, current_ir_graph->current_block,
2950 res->attr.except.pin_state = op_pin_state_pinned;
2951 #if PRECISE_EXC_CONTEXT
2952 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2959 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2962 res = new_bd_Mod (db, current_ir_graph->current_block,
2964 res->attr.except.pin_state = op_pin_state_pinned;
2965 #if PRECISE_EXC_CONTEXT
2966 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2973 new_d_And (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2975 return new_bd_And (db, current_ir_graph->current_block,
2980 new_d_Or (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2982 return new_bd_Or (db, current_ir_graph->current_block,
2987 new_d_Eor (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2989 return new_bd_Eor (db, current_ir_graph->current_block,
2994 new_d_Not (dbg_info *db, ir_node *op, ir_mode *mode)
2996 return new_bd_Not (db, current_ir_graph->current_block,
3001 new_d_Shl (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3003 return new_bd_Shl (db, current_ir_graph->current_block,
3008 new_d_Shr (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3010 return new_bd_Shr (db, current_ir_graph->current_block,
3015 new_d_Shrs (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3017 return new_bd_Shrs (db, current_ir_graph->current_block,
3022 new_d_Rot (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3024 return new_bd_Rot (db, current_ir_graph->current_block,
3029 new_d_Abs (dbg_info *db, ir_node *op, ir_mode *mode)
3031 return new_bd_Abs (db, current_ir_graph->current_block,
3036 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
3038 return new_bd_Cmp (db, current_ir_graph->current_block,
3043 new_d_Jmp (dbg_info *db)
3045 return new_bd_Jmp (db, current_ir_graph->current_block);
3049 new_d_IJmp (dbg_info *db, ir_node *tgt)
3051 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
3055 new_d_Cond (dbg_info *db, ir_node *c)
3057 return new_bd_Cond (db, current_ir_graph->current_block, c);
3061 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
3065 res = new_bd_Call (db, current_ir_graph->current_block,
3066 store, callee, arity, in, tp);
3067 #if PRECISE_EXC_CONTEXT
3068 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
3075 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
3077 return new_bd_Return (db, current_ir_graph->current_block,
3082 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
3084 return new_bd_Raise (db, current_ir_graph->current_block,
3089 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
3092 res = new_bd_Load (db, current_ir_graph->current_block,
3094 #if PRECISE_EXC_CONTEXT
3095 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
3102 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
3105 res = new_bd_Store (db, current_ir_graph->current_block,
3107 #if PRECISE_EXC_CONTEXT
3108 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
3115 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, type *alloc_type,
3119 res = new_bd_Alloc (db, current_ir_graph->current_block,
3120 store, size, alloc_type, where);
3121 #if PRECISE_EXC_CONTEXT
3122 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
3129 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
3130 ir_node *size, type *free_type, where_alloc where)
3132 return new_bd_Free (db, current_ir_graph->current_block,
3133 store, ptr, size, free_type, where);
3137 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
3138 /* GL: objptr was called frame before. Frame was a bad choice for the name
3139 as the operand could as well be a pointer to a dynamic object. */
3141 return new_bd_Sel (db, current_ir_graph->current_block,
3142 store, objptr, 0, NULL, ent);
3146 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
3148 return new_bd_Sel (db, current_ir_graph->current_block,
3149 store, objptr, n_index, index, sel);
3153 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
3155 return (new_bd_InstOf (db, current_ir_graph->current_block,
3156 store, objptr, ent));
3160 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, type *tp)
3162 return new_bd_SymConst_type (db, current_ir_graph->start_block,
3167 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
3169 return new_bd_SymConst (db, current_ir_graph->start_block,
3174 new_d_Sync (dbg_info *db, int arity, ir_node** in)
3176 return new_bd_Sync (db, current_ir_graph->current_block,
3183 return _new_d_Bad();
3187 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
3189 return new_bd_Confirm (db, current_ir_graph->current_block,
3194 new_d_Unknown (ir_mode *m)
3196 return new_bd_Unknown(m);
3200 new_d_CallBegin (dbg_info *db, ir_node *call)
3203 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
3208 new_d_EndReg (dbg_info *db)
3211 res = new_bd_EndReg(db, current_ir_graph->current_block);
3216 new_d_EndExcept (dbg_info *db)
3219 res = new_bd_EndExcept(db, current_ir_graph->current_block);
3224 new_d_Break (dbg_info *db)
3226 return new_bd_Break (db, current_ir_graph->current_block);
3230 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
3232 return new_bd_Filter (db, current_ir_graph->current_block,
3239 return _new_d_NoMem();
3243 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
3244 ir_node *ir_true, ir_mode *mode) {
3245 return new_bd_Mux (db, current_ir_graph->current_block,
3246 sel, ir_false, ir_true, mode);
3249 /* ********************************************************************* */
3250 /* Comfortable interface with automatic Phi node construction. */
3251 /* (Uses also constructors of ?? interface, except new_Block. */
3252 /* ********************************************************************* */
3254 /* Block construction */
3255 /* immature Block without predecessors */
3256 ir_node *new_d_immBlock (dbg_info *db) {
3259 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3260 /* creates a new dynamic in-array as length of in is -1 */
3261 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3262 current_ir_graph->current_block = res;
3263 res->attr.block.matured = 0;
3264 res->attr.block.dead = 0;
3265 /* res->attr.block.exc = exc_normal; */
3266 /* res->attr.block.handler_entry = 0; */
3267 res->attr.block.irg = current_ir_graph;
3268 res->attr.block.backedge = NULL;
3269 res->attr.block.in_cg = NULL;
3270 res->attr.block.cg_backedge = NULL;
3271 set_Block_block_visited(res, 0);
3273 /* Create and initialize array for Phi-node construction. */
3274 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3275 current_ir_graph->n_loc);
3276 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3278 /* Immature block may not be optimized! */
3279 IRN_VRFY_IRG(res, current_ir_graph);
3285 new_immBlock (void) {
3286 return new_d_immBlock(NULL);
3289 /* add an edge to a jmp/control flow node */
3291 add_immBlock_pred (ir_node *block, ir_node *jmp)
3293 if (block->attr.block.matured) {
3294 assert(0 && "Error: Block already matured!\n");
3297 assert(jmp != NULL);
3298 ARR_APP1(ir_node *, block->in, jmp);
3302 /* changing the current block */
3304 set_cur_block (ir_node *target) {
3305 current_ir_graph->current_block = target;
3308 /* ************************ */
3309 /* parameter administration */
3311 /* get a value from the parameter array from the current block by its index */
3313 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3315 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3316 inc_irg_visited(current_ir_graph);
3318 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3320 /* get a value from the parameter array from the current block by its index */
3322 get_value (int pos, ir_mode *mode)
3324 return get_d_value(NULL, pos, mode);
3327 /* set a value at position pos in the parameter array from the current block */
3329 set_value (int pos, ir_node *value)
3331 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3332 assert(pos+1 < current_ir_graph->n_loc);
3333 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3336 /* get the current store */
3340 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3341 /* GL: one could call get_value instead */
3342 inc_irg_visited(current_ir_graph);
3343 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3346 /* set the current store */
3348 set_store (ir_node *store)
3350 /* GL: one could call set_value instead */
3351 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3352 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3356 keep_alive (ir_node *ka) {
3357 add_End_keepalive(current_ir_graph->end, ka);
3360 /* --- Useful access routines --- */
3361 /* Returns the current block of the current graph. To set the current
3362 block use set_cur_block. */
3363 ir_node *get_cur_block(void) {
3364 return get_irg_current_block(current_ir_graph);
3367 /* Returns the frame type of the current graph */
3368 type *get_cur_frame_type(void) {
3369 return get_irg_frame_type(current_ir_graph);
3373 /* ********************************************************************* */
3376 /* call once for each run of the library */
3378 init_cons(uninitialized_local_variable_func_t *func)
3380 default_initialize_local_variable = func;
3383 /* call for each graph */
3385 irg_finalize_cons (ir_graph *irg) {
3386 irg->phase_state = phase_high;
3390 irp_finalize_cons (void) {
3391 int i, n_irgs = get_irp_n_irgs();
3392 for (i = 0; i < n_irgs; i++) {
3393 irg_finalize_cons(get_irp_irg(i));
3395 irp->phase_state = phase_high;\
3401 ir_node *new_Block(int arity, ir_node **in) {
3402 return new_d_Block(NULL, arity, in);
3404 ir_node *new_Start (void) {
3405 return new_d_Start(NULL);
3407 ir_node *new_End (void) {
3408 return new_d_End(NULL);
3410 ir_node *new_Jmp (void) {
3411 return new_d_Jmp(NULL);
3413 ir_node *new_IJmp (ir_node *tgt) {
3414 return new_d_IJmp(NULL, tgt);
3416 ir_node *new_Cond (ir_node *c) {
3417 return new_d_Cond(NULL, c);
3419 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3420 return new_d_Return(NULL, store, arity, in);
3422 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3423 return new_d_Raise(NULL, store, obj);
3425 ir_node *new_Const (ir_mode *mode, tarval *con) {
3426 return new_d_Const(NULL, mode, con);
3429 ir_node *new_Const_long(ir_mode *mode, long value)
3431 return new_d_Const_long(NULL, mode, value);
3434 ir_node *new_Const_type(tarval *con, type *tp) {
3435 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3438 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3439 return new_d_SymConst(NULL, value, kind);
3441 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3442 return new_d_simpleSel(NULL, store, objptr, ent);
3444 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3446 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3448 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
3449 return new_d_InstOf (NULL, store, objptr, ent);
3451 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3453 return new_d_Call(NULL, store, callee, arity, in, tp);
3455 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3456 return new_d_Add(NULL, op1, op2, mode);
3458 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3459 return new_d_Sub(NULL, op1, op2, mode);
3461 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3462 return new_d_Minus(NULL, op, mode);
3464 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3465 return new_d_Mul(NULL, op1, op2, mode);
3467 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3468 return new_d_Quot(NULL, memop, op1, op2);
3470 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3471 return new_d_DivMod(NULL, memop, op1, op2);
3473 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3474 return new_d_Div(NULL, memop, op1, op2);
3476 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3477 return new_d_Mod(NULL, memop, op1, op2);
3479 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3480 return new_d_Abs(NULL, op, mode);
3482 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3483 return new_d_And(NULL, op1, op2, mode);
3485 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3486 return new_d_Or(NULL, op1, op2, mode);
3488 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3489 return new_d_Eor(NULL, op1, op2, mode);
3491 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3492 return new_d_Not(NULL, op, mode);
3494 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3495 return new_d_Shl(NULL, op, k, mode);
3497 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3498 return new_d_Shr(NULL, op, k, mode);
3500 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3501 return new_d_Shrs(NULL, op, k, mode);
3503 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3504 return new_d_Rot(NULL, op, k, mode);
3506 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3507 return new_d_Cmp(NULL, op1, op2);
3509 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3510 return new_d_Conv(NULL, op, mode);
3512 ir_node *new_Cast (ir_node *op, type *to_tp) {
3513 return new_d_Cast(NULL, op, to_tp);
3515 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3516 return new_d_Phi(NULL, arity, in, mode);
3518 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3519 return new_d_Load(NULL, store, addr, mode);
3521 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3522 return new_d_Store(NULL, store, addr, val);
3524 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
3525 where_alloc where) {
3526 return new_d_Alloc(NULL, store, size, alloc_type, where);
3528 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3529 type *free_type, where_alloc where) {
3530 return new_d_Free(NULL, store, ptr, size, free_type, where);
3532 ir_node *new_Sync (int arity, ir_node **in) {
3533 return new_d_Sync(NULL, arity, in);
3535 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3536 return new_d_Proj(NULL, arg, mode, proj);
3538 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3539 return new_d_defaultProj(NULL, arg, max_proj);
3541 ir_node *new_Tuple (int arity, ir_node **in) {
3542 return new_d_Tuple(NULL, arity, in);
3544 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3545 return new_d_Id(NULL, val, mode);
3547 ir_node *new_Bad (void) {
3550 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3551 return new_d_Confirm (NULL, val, bound, cmp);
3553 ir_node *new_Unknown(ir_mode *m) {
3554 return new_d_Unknown(m);
3556 ir_node *new_CallBegin (ir_node *callee) {
3557 return new_d_CallBegin(NULL, callee);
3559 ir_node *new_EndReg (void) {
3560 return new_d_EndReg(NULL);
3562 ir_node *new_EndExcept (void) {
3563 return new_d_EndExcept(NULL);
3565 ir_node *new_Break (void) {
3566 return new_d_Break(NULL);
3568 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3569 return new_d_Filter(NULL, arg, mode, proj);
3571 ir_node *new_NoMem (void) {
3572 return new_d_NoMem();
3574 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3575 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);