3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irgraph_t.h"
29 # include "irnode_t.h"
30 # include "irmode_t.h"
31 # include "ircons_t.h"
32 # include "firm_common_t.h"
38 # include "irbackedge_t.h"
39 # include "irflag_t.h"
41 #if USE_EXPLICIT_PHI_IN_STACK
42 /* A stack needed for the automatic Phi node construction in constructor
43 Phi_in. Redefinition in irgraph.c!! */
48 typedef struct Phi_in_stack Phi_in_stack;
51 /* when we need verifying */
53 # define IRN_VRFY_IRG(res, irg)
55 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
59 * language dependant initialization variable
61 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
63 /* -------------------------------------------- */
64 /* privat interfaces, for professional use only */
65 /* -------------------------------------------- */
67 /* Constructs a Block with a fixed number of predecessors.
68 Does not set current_block. Can not be used with automatic
69 Phi node construction. */
71 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
87 IRN_VRFY_IRG(res, irg);
92 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
96 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
97 /* res->attr.start.irg = irg; */
99 IRN_VRFY_IRG(res, irg);
104 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
108 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
110 IRN_VRFY_IRG(res, irg);
114 /* Creates a Phi node with all predecessors. Calling this constructor
115 is only allowed if the corresponding block is mature. */
117 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
121 bool has_unknown = false;
123 /* Don't assert that block matured: the use of this constructor is strongly
125 if ( get_Block_matured(block) )
126 assert( get_irn_arity(block) == arity );
128 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
130 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
132 for (i = arity-1; i >= 0; i--)
133 if (get_irn_op(in[i]) == op_Unknown) {
138 if (!has_unknown) res = optimize_node (res);
139 IRN_VRFY_IRG(res, irg);
141 /* Memory Phis in endless loops must be kept alive.
142 As we can't distinguish these easily we keep all of them alive. */
143 if ((res->op == op_Phi) && (mode == mode_M))
144 add_End_keepalive(irg->end, res);
149 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
153 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
154 res->attr.con.tv = con;
155 set_Const_type(res, tp); /* Call method because of complex assertion. */
156 res = optimize_node (res);
157 assert(get_Const_type(res) == tp);
158 IRN_VRFY_IRG(res, irg);
164 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
166 type *tp = unknown_type;
167 /* removing this somehow causes errors in jack. */
168 return new_rd_Const_type (db, irg, block, mode, con, tp);
172 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
176 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
177 res = optimize_node(res);
178 IRN_VRFY_IRG(res, irg);
183 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
188 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
189 res->attr.proj = proj;
192 assert(get_Proj_pred(res));
193 assert(get_nodes_block(get_Proj_pred(res)));
195 res = optimize_node(res);
197 IRN_VRFY_IRG(res, irg);
203 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
207 assert(arg->op == op_Cond);
208 arg->attr.c.kind = fragmentary;
209 arg->attr.c.default_proj = max_proj;
210 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
215 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
219 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
220 res = optimize_node(res);
221 IRN_VRFY_IRG(res, irg);
226 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
230 assert(is_atomic_type(to_tp));
232 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
233 res->attr.cast.totype = to_tp;
234 res = optimize_node(res);
235 IRN_VRFY_IRG(res, irg);
240 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
244 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
245 res = optimize_node (res);
246 IRN_VRFY_IRG(res, irg);
251 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
252 ir_node *op1, ir_node *op2, ir_mode *mode)
259 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
260 res = optimize_node(res);
261 IRN_VRFY_IRG(res, irg);
266 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
267 ir_node *op1, ir_node *op2, ir_mode *mode)
274 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
275 res = optimize_node (res);
276 IRN_VRFY_IRG(res, irg);
281 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
282 ir_node *op, ir_mode *mode)
286 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
287 res = optimize_node(res);
288 IRN_VRFY_IRG(res, irg);
293 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
294 ir_node *op1, ir_node *op2, ir_mode *mode)
301 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
308 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
309 ir_node *memop, ir_node *op1, ir_node *op2)
317 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
318 res = optimize_node(res);
319 IRN_VRFY_IRG(res, irg);
324 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
325 ir_node *memop, ir_node *op1, ir_node *op2)
333 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
334 res = optimize_node(res);
335 IRN_VRFY_IRG(res, irg);
340 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
341 ir_node *memop, ir_node *op1, ir_node *op2)
349 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
350 res = optimize_node(res);
351 IRN_VRFY_IRG(res, irg);
356 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
357 ir_node *memop, ir_node *op1, ir_node *op2)
365 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
366 res = optimize_node(res);
367 IRN_VRFY_IRG(res, irg);
372 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
373 ir_node *op1, ir_node *op2, ir_mode *mode)
380 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
381 res = optimize_node(res);
382 IRN_VRFY_IRG(res, irg);
387 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
388 ir_node *op1, ir_node *op2, ir_mode *mode)
395 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
396 res = optimize_node(res);
397 IRN_VRFY_IRG(res, irg);
402 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
403 ir_node *op1, ir_node *op2, ir_mode *mode)
410 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
411 res = optimize_node (res);
412 IRN_VRFY_IRG(res, irg);
417 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
418 ir_node *op, ir_mode *mode)
422 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
423 res = optimize_node(res);
424 IRN_VRFY_IRG(res, irg);
429 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
430 ir_node *op, ir_node *k, ir_mode *mode)
437 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
438 res = optimize_node(res);
439 IRN_VRFY_IRG(res, irg);
444 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
445 ir_node *op, ir_node *k, ir_mode *mode)
452 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
453 res = optimize_node(res);
454 IRN_VRFY_IRG(res, irg);
459 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
460 ir_node *op, ir_node *k, ir_mode *mode)
467 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
468 res = optimize_node(res);
469 IRN_VRFY_IRG(res, irg);
474 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
475 ir_node *op, ir_node *k, ir_mode *mode)
482 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
483 res = optimize_node(res);
484 IRN_VRFY_IRG(res, irg);
489 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
490 ir_node *op, ir_mode *mode)
494 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
495 res = optimize_node (res);
496 IRN_VRFY_IRG(res, irg);
501 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
502 ir_node *op1, ir_node *op2)
509 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
510 res = optimize_node(res);
511 IRN_VRFY_IRG(res, irg);
516 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
520 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
521 res = optimize_node (res);
522 IRN_VRFY_IRG (res, irg);
527 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
531 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
532 res->attr.c.kind = dense;
533 res->attr.c.default_proj = 0;
534 res = optimize_node (res);
535 IRN_VRFY_IRG(res, irg);
540 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
541 ir_node *callee, int arity, ir_node **in, type *tp)
548 NEW_ARR_A(ir_node *, r_in, r_arity);
551 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
553 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
555 assert((get_unknown_type() == tp) || is_method_type(tp));
556 set_Call_type(res, tp);
557 res->attr.call.exc.pin_state = op_pin_state_pinned;
558 res->attr.call.callee_arr = NULL;
559 res = optimize_node(res);
560 IRN_VRFY_IRG(res, irg);
565 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
566 ir_node *store, int arity, ir_node **in)
573 NEW_ARR_A (ir_node *, r_in, r_arity);
575 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
576 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
577 res = optimize_node(res);
578 IRN_VRFY_IRG(res, irg);
583 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
590 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
591 res = optimize_node(res);
592 IRN_VRFY_IRG(res, irg);
597 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
598 ir_node *store, ir_node *adr, ir_mode *mode)
605 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
606 res->attr.load.exc.pin_state = op_pin_state_pinned;
607 res->attr.load.load_mode = mode;
608 res->attr.load.volatility = volatility_non_volatile;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
615 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
616 ir_node *store, ir_node *adr, ir_node *val)
624 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
625 res->attr.store.exc.pin_state = op_pin_state_pinned;
626 res->attr.store.volatility = volatility_non_volatile;
627 res = optimize_node(res);
628 IRN_VRFY_IRG(res, irg);
633 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
634 ir_node *size, type *alloc_type, where_alloc where)
641 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
642 res->attr.a.exc.pin_state = op_pin_state_pinned;
643 res->attr.a.where = where;
644 res->attr.a.type = alloc_type;
645 res = optimize_node(res);
646 IRN_VRFY_IRG(res, irg);
651 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
652 ir_node *ptr, ir_node *size, type *free_type)
660 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
661 res->attr.f = free_type;
662 res = optimize_node(res);
663 IRN_VRFY_IRG(res, irg);
668 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
669 int arity, ir_node **in, entity *ent)
675 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
678 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
681 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
682 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
683 res->attr.s.ent = ent;
684 res = optimize_node(res);
685 IRN_VRFY_IRG(res, irg);
690 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
691 ir_node *objptr, type *ent)
698 NEW_ARR_A(ir_node *, r_in, r_arity);
702 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
703 res->attr.io.ent = ent;
705 /* res = optimize(res); */
706 IRN_VRFY_IRG(res, irg);
711 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
712 symconst_kind symkind, type *tp) {
716 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
721 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
723 res->attr.i.num = symkind;
724 res->attr.i.sym = value;
727 res = optimize_node(res);
728 IRN_VRFY_IRG(res, irg);
733 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
734 symconst_kind symkind)
736 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
740 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
741 symconst_symbol sym = {(type *)symbol};
742 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
745 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
746 symconst_symbol sym = {(type *)symbol};
747 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
750 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
751 symconst_symbol sym = {symbol};
752 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
755 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
756 symconst_symbol sym = {symbol};
757 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
761 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
765 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
766 res = optimize_node(res);
767 IRN_VRFY_IRG(res, irg);
772 new_rd_Bad (ir_graph *irg)
778 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
780 ir_node *in[2], *res;
784 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
785 res->attr.confirm_cmp = cmp;
786 res = optimize_node (res);
787 IRN_VRFY_IRG(res, irg);
792 new_rd_Unknown (ir_graph *irg, ir_mode *m)
794 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
798 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
803 in[0] = get_Call_ptr(call);
804 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
805 /* res->attr.callbegin.irg = irg; */
806 res->attr.callbegin.call = call;
807 res = optimize_node(res);
808 IRN_VRFY_IRG(res, irg);
813 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
817 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
819 IRN_VRFY_IRG(res, irg);
824 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
828 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
829 irg->end_except = res;
830 IRN_VRFY_IRG (res, irg);
835 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
839 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
840 res = optimize_node(res);
841 IRN_VRFY_IRG(res, irg);
846 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
851 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
852 res->attr.filter.proj = proj;
853 res->attr.filter.in_cg = NULL;
854 res->attr.filter.backedge = NULL;
857 assert(get_Proj_pred(res));
858 assert(get_nodes_block(get_Proj_pred(res)));
860 res = optimize_node(res);
861 IRN_VRFY_IRG(res, irg);
867 new_rd_NoMem (ir_graph *irg)
873 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
874 return new_rd_Block(NULL, irg, arity, in);
876 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
877 return new_rd_Start(NULL, irg, block);
879 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
880 return new_rd_End(NULL, irg, block);
882 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
883 return new_rd_Jmp(NULL, irg, block);
885 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
886 return new_rd_Cond(NULL, irg, block, c);
888 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
889 ir_node *store, int arity, ir_node **in) {
890 return new_rd_Return(NULL, irg, block, store, arity, in);
892 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
893 ir_node *store, ir_node *obj) {
894 return new_rd_Raise(NULL, irg, block, store, obj);
896 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
897 ir_mode *mode, tarval *con) {
898 return new_rd_Const(NULL, irg, block, mode, con);
900 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
901 symconst_symbol value, symconst_kind symkind) {
902 return new_rd_SymConst(NULL, irg, block, value, symkind);
904 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
905 ir_node *objptr, int n_index, ir_node **index,
907 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
909 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
911 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
913 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
914 ir_node *callee, int arity, ir_node **in,
916 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
918 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
919 ir_node *op1, ir_node *op2, ir_mode *mode) {
920 return new_rd_Add(NULL, irg, block, op1, op2, mode);
922 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
923 ir_node *op1, ir_node *op2, ir_mode *mode) {
924 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
926 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
927 ir_node *op, ir_mode *mode) {
928 return new_rd_Minus(NULL, irg, block, op, mode);
930 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
931 ir_node *op1, ir_node *op2, ir_mode *mode) {
932 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
934 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
935 ir_node *memop, ir_node *op1, ir_node *op2) {
936 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
938 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
939 ir_node *memop, ir_node *op1, ir_node *op2) {
940 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
942 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
943 ir_node *memop, ir_node *op1, ir_node *op2) {
944 return new_rd_Div(NULL, irg, block, memop, op1, op2);
946 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
947 ir_node *memop, ir_node *op1, ir_node *op2) {
948 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
950 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
951 ir_node *op, ir_mode *mode) {
952 return new_rd_Abs(NULL, irg, block, op, mode);
954 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
955 ir_node *op1, ir_node *op2, ir_mode *mode) {
956 return new_rd_And(NULL, irg, block, op1, op2, mode);
958 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
959 ir_node *op1, ir_node *op2, ir_mode *mode) {
960 return new_rd_Or(NULL, irg, block, op1, op2, mode);
962 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
963 ir_node *op1, ir_node *op2, ir_mode *mode) {
964 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
966 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
967 ir_node *op, ir_mode *mode) {
968 return new_rd_Not(NULL, irg, block, op, mode);
970 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
971 ir_node *op1, ir_node *op2) {
972 return new_rd_Cmp(NULL, irg, block, op1, op2);
974 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
975 ir_node *op, ir_node *k, ir_mode *mode) {
976 return new_rd_Shl(NULL, irg, block, op, k, mode);
978 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
979 ir_node *op, ir_node *k, ir_mode *mode) {
980 return new_rd_Shr(NULL, irg, block, op, k, mode);
982 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
983 ir_node *op, ir_node *k, ir_mode *mode) {
984 return new_rd_Shrs(NULL, irg, block, op, k, mode);
986 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
987 ir_node *op, ir_node *k, ir_mode *mode) {
988 return new_rd_Rot(NULL, irg, block, op, k, mode);
990 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
991 ir_node *op, ir_mode *mode) {
992 return new_rd_Conv(NULL, irg, block, op, mode);
994 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
995 return new_rd_Cast(NULL, irg, block, op, to_tp);
997 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
998 ir_node **in, ir_mode *mode) {
999 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1001 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1002 ir_node *store, ir_node *adr, ir_mode *mode) {
1003 return new_rd_Load(NULL, irg, block, store, adr, mode);
1005 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1006 ir_node *store, ir_node *adr, ir_node *val) {
1007 return new_rd_Store(NULL, irg, block, store, adr, val);
1009 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1010 ir_node *size, type *alloc_type, where_alloc where) {
1011 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1013 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1014 ir_node *ptr, ir_node *size, type *free_type) {
1015 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1017 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1018 return new_rd_Sync(NULL, irg, block, arity, in);
1020 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1021 ir_mode *mode, long proj) {
1022 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1024 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1026 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1028 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1029 int arity, ir_node **in) {
1030 return new_rd_Tuple(NULL, irg, block, arity, in );
1032 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1033 ir_node *val, ir_mode *mode) {
1034 return new_rd_Id(NULL, irg, block, val, mode);
1036 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1037 return new_rd_Bad(irg);
1039 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1040 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1042 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1043 return new_rd_Unknown(irg, m);
1045 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1046 return new_rd_CallBegin(NULL, irg, block, callee);
1048 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1049 return new_rd_EndReg(NULL, irg, block);
1051 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1052 return new_rd_EndExcept(NULL, irg, block);
1054 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1055 return new_rd_Break(NULL, irg, block);
1057 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1058 ir_mode *mode, long proj) {
1059 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1061 INLINE ir_node *new_r_NoMem (ir_graph *irg) {
1062 return new_rd_NoMem(irg);
1066 /** ********************/
1067 /** public interfaces */
1068 /** construction tools */
1072 * - create a new Start node in the current block
1074 * @return s - pointer to the created Start node
1079 new_d_Start (dbg_info* db)
1083 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1084 op_Start, mode_T, 0, NULL);
1085 /* res->attr.start.irg = current_ir_graph; */
1087 res = optimize_node(res);
1088 IRN_VRFY_IRG(res, current_ir_graph);
1093 new_d_End (dbg_info* db)
1096 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1097 op_End, mode_X, -1, NULL);
1098 res = optimize_node(res);
1099 IRN_VRFY_IRG(res, current_ir_graph);
1104 /* Constructs a Block with a fixed number of predecessors.
1105 Does set current_block. Can be used with automatic Phi
1106 node construction. */
1108 new_d_Block (dbg_info* db, int arity, ir_node **in)
1112 bool has_unknown = false;
1114 res = new_rd_Block(db, current_ir_graph, arity, in);
1116 /* Create and initialize array for Phi-node construction. */
1117 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1118 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1119 current_ir_graph->n_loc);
1120 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1123 for (i = arity-1; i >= 0; i--)
1124 if (get_irn_op(in[i]) == op_Unknown) {
1129 if (!has_unknown) res = optimize_node(res);
1130 current_ir_graph->current_block = res;
1132 IRN_VRFY_IRG(res, current_ir_graph);
1137 /* ***********************************************************************/
1138 /* Methods necessary for automatic Phi node creation */
1140 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1141 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1142 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1143 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1145 Call Graph: ( A ---> B == A "calls" B)
1147 get_value mature_immBlock
1155 get_r_value_internal |
1159 new_rd_Phi0 new_rd_Phi_in
1161 * *************************************************************************** */
1163 /** Creates a Phi node with 0 predecessors */
1164 static INLINE ir_node *
1165 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1169 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1170 IRN_VRFY_IRG(res, irg);
1174 /* There are two implementations of the Phi node construction. The first
1175 is faster, but does not work for blocks with more than 2 predecessors.
1176 The second works always but is slower and causes more unnecessary Phi
1178 Select the implementations by the following preprocessor flag set in
1180 #if USE_FAST_PHI_CONSTRUCTION
1182 /* This is a stack used for allocating and deallocating nodes in
1183 new_rd_Phi_in. The original implementation used the obstack
1184 to model this stack, now it is explicit. This reduces side effects.
1186 #if USE_EXPLICIT_PHI_IN_STACK
1187 INLINE Phi_in_stack *
1188 new_Phi_in_stack(void) {
1191 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1193 res->stack = NEW_ARR_F (ir_node *, 0);
1200 free_Phi_in_stack(Phi_in_stack *s) {
1201 DEL_ARR_F(s->stack);
1205 free_to_Phi_in_stack(ir_node *phi) {
1206 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1207 current_ir_graph->Phi_in_stack->pos)
1208 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1210 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1212 (current_ir_graph->Phi_in_stack->pos)++;
1215 static INLINE ir_node *
1216 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1217 int arity, ir_node **in) {
1219 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1220 int pos = current_ir_graph->Phi_in_stack->pos;
1224 /* We need to allocate a new node */
1225 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1226 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1228 /* reuse the old node and initialize it again. */
1231 assert (res->kind == k_ir_node);
1232 assert (res->op == op_Phi);
1236 assert (arity >= 0);
1237 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1238 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1240 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1242 (current_ir_graph->Phi_in_stack->pos)--;
1246 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1248 /* Creates a Phi node with a given, fixed array **in of predecessors.
1249 If the Phi node is unnecessary, as the same value reaches the block
1250 through all control flow paths, it is eliminated and the value
1251 returned directly. This constructor is only intended for use in
1252 the automatic Phi node generation triggered by get_value or mature.
1253 The implementation is quite tricky and depends on the fact, that
1254 the nodes are allocated on a stack:
1255 The in array contains predecessors and NULLs. The NULLs appear,
1256 if get_r_value_internal, that computed the predecessors, reached
1257 the same block on two paths. In this case the same value reaches
1258 this block on both paths, there is no definition in between. We need
1259 not allocate a Phi where these path's merge, but we have to communicate
1260 this fact to the caller. This happens by returning a pointer to the
1261 node the caller _will_ allocate. (Yes, we predict the address. We can
1262 do so because the nodes are allocated on the obstack.) The caller then
1263 finds a pointer to itself and, when this routine is called again,
1266 static INLINE ir_node *
1267 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1270 ir_node *res, *known;
1272 /* Allocate a new node on the obstack. This can return a node to
1273 which some of the pointers in the in-array already point.
1274 Attention: the constructor copies the in array, i.e., the later
1275 changes to the array in this routine do not affect the
1276 constructed node! If the in array contains NULLs, there will be
1277 missing predecessors in the returned node. Is this a possible
1278 internal state of the Phi node generation? */
1279 #if USE_EXPLICIT_PHI_IN_STACK
1280 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1282 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1283 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1286 /* The in-array can contain NULLs. These were returned by
1287 get_r_value_internal if it reached the same block/definition on a
1288 second path. The NULLs are replaced by the node itself to
1289 simplify the test in the next loop. */
1290 for (i = 0; i < ins; ++i) {
1295 /* This loop checks whether the Phi has more than one predecessor.
1296 If so, it is a real Phi node and we break the loop. Else the Phi
1297 node merges the same definition on several paths and therefore is
1299 for (i = 0; i < ins; ++i)
1301 if (in[i] == res || in[i] == known) continue;
1309 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1311 #if USE_EXPLICIT_PHI_IN_STACK
1312 free_to_Phi_in_stack(res);
1314 obstack_free (current_ir_graph->obst, res);
1318 res = optimize_node (res);
1319 IRN_VRFY_IRG(res, irg);
1322 /* return the pointer to the Phi node. This node might be deallocated! */
1327 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1330 allocates and returns this node. The routine called to allocate the
1331 node might optimize it away and return a real value, or even a pointer
1332 to a deallocated Phi node on top of the obstack!
1333 This function is called with an in-array of proper size. **/
1335 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1337 ir_node *prevBlock, *res;
1340 /* This loop goes to all predecessor blocks of the block the Phi node is in
1341 and there finds the operands of the Phi node by calling
1342 get_r_value_internal. */
1343 for (i = 1; i <= ins; ++i) {
1344 assert (block->in[i]);
1345 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1347 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1350 /* After collecting all predecessors into the array nin a new Phi node
1351 with these predecessors is created. This constructor contains an
1352 optimization: If all predecessors of the Phi node are identical it
1353 returns the only operand instead of a new Phi node. If the value
1354 passes two different control flow edges without being defined, and
1355 this is the second path treated, a pointer to the node that will be
1356 allocated for the first path (recursion) is returned. We already
1357 know the address of this node, as it is the next node to be allocated
1358 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1359 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1361 /* Now we now the value for "pos" and can enter it in the array with
1362 all known local variables. Attention: this might be a pointer to
1363 a node, that later will be allocated!!! See new_rd_Phi_in.
1364 If this is called in mature, after some set_value in the same block,
1365 the proper value must not be overwritten:
1367 get_value (makes Phi0, put's it into graph_arr)
1368 set_value (overwrites Phi0 in graph_arr)
1369 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1372 if (!block->attr.block.graph_arr[pos]) {
1373 block->attr.block.graph_arr[pos] = res;
1375 /* printf(" value already computed by %s\n",
1376 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1382 /* This function returns the last definition of a variable. In case
1383 this variable was last defined in a previous block, Phi nodes are
1384 inserted. If the part of the firm graph containing the definition
1385 is not yet constructed, a dummy Phi node is returned. */
1387 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1390 /* There are 4 cases to treat.
1392 1. The block is not mature and we visit it the first time. We can not
1393 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1394 predecessors is returned. This node is added to the linked list (field
1395 "link") of the containing block to be completed when this block is
1396 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1399 2. The value is already known in this block, graph_arr[pos] is set and we
1400 visit the block the first time. We can return the value without
1401 creating any new nodes.
1403 3. The block is mature and we visit it the first time. A Phi node needs
1404 to be created (phi_merge). If the Phi is not needed, as all it's
1405 operands are the same value reaching the block through different
1406 paths, it's optimized away and the value itself is returned.
1408 4. The block is mature, and we visit it the second time. Now two
1409 subcases are possible:
1410 * The value was computed completely the last time we were here. This
1411 is the case if there is no loop. We can return the proper value.
1412 * The recursion that visited this node and set the flag did not
1413 return yet. We are computing a value in a loop and need to
1414 break the recursion without knowing the result yet.
1415 @@@ strange case. Straight forward we would create a Phi before
1416 starting the computation of it's predecessors. In this case we will
1417 find a Phi here in any case. The problem is that this implementation
1418 only creates a Phi after computing the predecessors, so that it is
1419 hard to compute self references of this Phi. @@@
1420 There is no simple check for the second subcase. Therefore we check
1421 for a second visit and treat all such cases as the second subcase.
1422 Anyways, the basic situation is the same: we reached a block
1423 on two paths without finding a definition of the value: No Phi
1424 nodes are needed on both paths.
1425 We return this information "Two paths, no Phi needed" by a very tricky
1426 implementation that relies on the fact that an obstack is a stack and
1427 will return a node with the same address on different allocations.
1428 Look also at phi_merge and new_rd_phi_in to understand this.
1429 @@@ Unfortunately this does not work, see testprogram
1430 three_cfpred_example.
1434 /* case 4 -- already visited. */
1435 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1437 /* visited the first time */
1438 set_irn_visited(block, get_irg_visited(current_ir_graph));
1440 /* Get the local valid value */
1441 res = block->attr.block.graph_arr[pos];
1443 /* case 2 -- If the value is actually computed, return it. */
1444 if (res) return res;
1446 if (block->attr.block.matured) { /* case 3 */
1448 /* The Phi has the same amount of ins as the corresponding block. */
1449 int ins = get_irn_arity(block);
1451 NEW_ARR_A (ir_node *, nin, ins);
1453 /* Phi merge collects the predecessors and then creates a node. */
1454 res = phi_merge (block, pos, mode, nin, ins);
1456 } else { /* case 1 */
1457 /* The block is not mature, we don't know how many in's are needed. A Phi
1458 with zero predecessors is created. Such a Phi node is called Phi0
1459 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1460 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1462 The Phi0 has to remember the pos of it's internal value. If the real
1463 Phi is computed, pos is used to update the array with the local
1466 res = new_rd_Phi0 (current_ir_graph, block, mode);
1467 res->attr.phi0_pos = pos;
1468 res->link = block->link;
1472 /* If we get here, the frontend missed a use-before-definition error */
1475 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1476 assert (mode->code >= irm_F && mode->code <= irm_P);
1477 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1478 tarval_mode_null[mode->code]);
1481 /* The local valid value is available now. */
1482 block->attr.block.graph_arr[pos] = res;
1490 it starts the recursion. This causes an Id at the entry of
1491 every block that has no definition of the value! **/
1493 #if USE_EXPLICIT_PHI_IN_STACK
1495 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1496 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1499 static INLINE ir_node *
1500 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1501 ir_node **in, int ins, ir_node *phi0)
1504 ir_node *res, *known;
1506 /* Allocate a new node on the obstack. The allocation copies the in
1508 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1509 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1511 /* This loop checks whether the Phi has more than one predecessor.
1512 If so, it is a real Phi node and we break the loop. Else the
1513 Phi node merges the same definition on several paths and therefore
1514 is not needed. Don't consider Bad nodes! */
1516 for (i=0; i < ins; ++i)
1520 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1522 /* Optimize self referencing Phis: We can't detect them yet properly, as
1523 they still refer to the Phi0 they will replace. So replace right now. */
1524 if (phi0 && in[i] == phi0) in[i] = res;
1526 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1534 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1537 obstack_free (current_ir_graph->obst, res);
1538 if (is_Phi(known)) {
1539 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1540 order, an enclosing Phi know may get superfluous. */
1541 res = optimize_in_place_2(known);
1542 if (res != known) { exchange(known, res); }
1547 /* A undefined value, e.g., in unreachable code. */
1551 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1552 IRN_VRFY_IRG(res, irg);
1553 /* Memory Phis in endless loops must be kept alive.
1554 As we can't distinguish these easily we keep all of them alive. */
1555 if ((res->op == op_Phi) && (mode == mode_M))
1556 add_End_keepalive(irg->end, res);
1563 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1565 #if PRECISE_EXC_CONTEXT
1567 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1569 /* Construct a new frag_array for node n.
1570 Copy the content from the current graph_arr of the corresponding block:
1571 this is the current state.
1572 Set ProjM(n) as current memory state.
1573 Further the last entry in frag_arr of current block points to n. This
1574 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1576 static INLINE ir_node ** new_frag_arr (ir_node *n)
1581 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1582 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1583 sizeof(ir_node *)*current_ir_graph->n_loc);
1585 /* turn off optimization before allocating Proj nodes, as res isn't
1587 opt = get_opt_optimize(); set_optimize(0);
1588 /* Here we rely on the fact that all frag ops have Memory as first result! */
1589 if (get_irn_op(n) == op_Call)
1590 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1592 assert((pn_Quot_M == pn_DivMod_M) &&
1593 (pn_Quot_M == pn_Div_M) &&
1594 (pn_Quot_M == pn_Mod_M) &&
1595 (pn_Quot_M == pn_Load_M) &&
1596 (pn_Quot_M == pn_Store_M) &&
1597 (pn_Quot_M == pn_Alloc_M) );
1598 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1602 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1607 * returns the frag_arr from a node
1609 static INLINE ir_node **
1610 get_frag_arr (ir_node *n) {
1611 switch (get_irn_opcode(n)) {
1613 return n->attr.call.exc.frag_arr;
1615 return n->attr.a.exc.frag_arr;
1617 return n->attr.load.exc.frag_arr;
1619 return n->attr.store.exc.frag_arr;
1621 return n->attr.except.frag_arr;
1626 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1628 if (!frag_arr[pos]) frag_arr[pos] = val;
1629 if (frag_arr[current_ir_graph->n_loc - 1]) {
1630 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1631 assert(arr != frag_arr && "Endless recursion detected");
1632 set_frag_value(arr, pos, val);
1637 for (i = 0; i < 1000; ++i) {
1638 if (!frag_arr[pos]) {
1639 frag_arr[pos] = val;
1641 if (frag_arr[current_ir_graph->n_loc - 1]) {
1642 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1648 assert(0 && "potential endless recursion");
1653 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1657 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1659 frag_arr = get_frag_arr(cfOp);
1660 res = frag_arr[pos];
1662 if (block->attr.block.graph_arr[pos]) {
1663 /* There was a set_value after the cfOp and no get_value before that
1664 set_value. We must build a Phi node now. */
1665 if (block->attr.block.matured) {
1666 int ins = get_irn_arity(block);
1668 NEW_ARR_A (ir_node *, nin, ins);
1669 res = phi_merge(block, pos, mode, nin, ins);
1671 res = new_rd_Phi0 (current_ir_graph, block, mode);
1672 res->attr.phi0_pos = pos;
1673 res->link = block->link;
1677 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1678 but this should be better: (remove comment if this works) */
1679 /* It's a Phi, we can write this into all graph_arrs with NULL */
1680 set_frag_value(block->attr.block.graph_arr, pos, res);
1682 res = get_r_value_internal(block, pos, mode);
1683 set_frag_value(block->attr.block.graph_arr, pos, res);
1691 computes the predecessors for the real phi node, and then
1692 allocates and returns this node. The routine called to allocate the
1693 node might optimize it away and return a real value.
1694 This function must be called with an in-array of proper size. **/
1696 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1698 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1701 /* If this block has no value at pos create a Phi0 and remember it
1702 in graph_arr to break recursions.
1703 Else we may not set graph_arr as there a later value is remembered. */
1705 if (!block->attr.block.graph_arr[pos]) {
1706 if (block == get_irg_start_block(current_ir_graph)) {
1707 /* Collapsing to Bad tarvals is no good idea.
1708 So we call a user-supplied routine here that deals with this case as
1709 appropriate for the given language. Sorryly the only help we can give
1710 here is the position.
1712 Even if all variables are defined before use, it can happen that
1713 we get to the start block, if a cond has been replaced by a tuple
1714 (bad, jmp). In this case we call the function needlessly, eventually
1715 generating an non existant error.
1716 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1719 if (default_initialize_local_variable)
1720 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1722 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1723 /* We don't need to care about exception ops in the start block.
1724 There are none by definition. */
1725 return block->attr.block.graph_arr[pos];
1727 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1728 block->attr.block.graph_arr[pos] = phi0;
1729 #if PRECISE_EXC_CONTEXT
1730 if (get_opt_precise_exc_context()) {
1731 /* Set graph_arr for fragile ops. Also here we should break recursion.
1732 We could choose a cyclic path through an cfop. But the recursion would
1733 break at some point. */
1734 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1740 /* This loop goes to all predecessor blocks of the block the Phi node
1741 is in and there finds the operands of the Phi node by calling
1742 get_r_value_internal. */
1743 for (i = 1; i <= ins; ++i) {
1744 prevCfOp = skip_Proj(block->in[i]);
1746 if (is_Bad(prevCfOp)) {
1747 /* In case a Cond has been optimized we would get right to the start block
1748 with an invalid definition. */
1749 nin[i-1] = new_Bad();
1752 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1754 if (!is_Bad(prevBlock)) {
1755 #if PRECISE_EXC_CONTEXT
1756 if (get_opt_precise_exc_context() &&
1757 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1758 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1759 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1762 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1764 nin[i-1] = new_Bad();
1768 /* We want to pass the Phi0 node to the constructor: this finds additional
1769 optimization possibilities.
1770 The Phi0 node either is allocated in this function, or it comes from
1771 a former call to get_r_value_internal. In this case we may not yet
1772 exchange phi0, as this is done in mature_immBlock. */
1774 phi0_all = block->attr.block.graph_arr[pos];
1775 if (!((get_irn_op(phi0_all) == op_Phi) &&
1776 (get_irn_arity(phi0_all) == 0) &&
1777 (get_nodes_block(phi0_all) == block)))
1783 /* After collecting all predecessors into the array nin a new Phi node
1784 with these predecessors is created. This constructor contains an
1785 optimization: If all predecessors of the Phi node are identical it
1786 returns the only operand instead of a new Phi node. */
1787 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1789 /* In case we allocated a Phi0 node at the beginning of this procedure,
1790 we need to exchange this Phi0 with the real Phi. */
1792 exchange(phi0, res);
1793 block->attr.block.graph_arr[pos] = res;
1794 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1795 only an optimization. */
1801 /* This function returns the last definition of a variable. In case
1802 this variable was last defined in a previous block, Phi nodes are
1803 inserted. If the part of the firm graph containing the definition
1804 is not yet constructed, a dummy Phi node is returned. */
1806 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1809 /* There are 4 cases to treat.
1811 1. The block is not mature and we visit it the first time. We can not
1812 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1813 predecessors is returned. This node is added to the linked list (field
1814 "link") of the containing block to be completed when this block is
1815 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1818 2. The value is already known in this block, graph_arr[pos] is set and we
1819 visit the block the first time. We can return the value without
1820 creating any new nodes.
1822 3. The block is mature and we visit it the first time. A Phi node needs
1823 to be created (phi_merge). If the Phi is not needed, as all it's
1824 operands are the same value reaching the block through different
1825 paths, it's optimized away and the value itself is returned.
1827 4. The block is mature, and we visit it the second time. Now two
1828 subcases are possible:
1829 * The value was computed completely the last time we were here. This
1830 is the case if there is no loop. We can return the proper value.
1831 * The recursion that visited this node and set the flag did not
1832 return yet. We are computing a value in a loop and need to
1833 break the recursion. This case only happens if we visited
1834 the same block with phi_merge before, which inserted a Phi0.
1835 So we return the Phi0.
1838 /* case 4 -- already visited. */
1839 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1840 /* As phi_merge allocates a Phi0 this value is always defined. Here
1841 is the critical difference of the two algorithms. */
1842 assert(block->attr.block.graph_arr[pos]);
1843 return block->attr.block.graph_arr[pos];
1846 /* visited the first time */
1847 set_irn_visited(block, get_irg_visited(current_ir_graph));
1849 /* Get the local valid value */
1850 res = block->attr.block.graph_arr[pos];
1852 /* case 2 -- If the value is actually computed, return it. */
1853 if (res) { return res; };
1855 if (block->attr.block.matured) { /* case 3 */
1857 /* The Phi has the same amount of ins as the corresponding block. */
1858 int ins = get_irn_arity(block);
1860 NEW_ARR_A (ir_node *, nin, ins);
1862 /* Phi merge collects the predecessors and then creates a node. */
1863 res = phi_merge (block, pos, mode, nin, ins);
1865 } else { /* case 1 */
1866 /* The block is not mature, we don't know how many in's are needed. A Phi
1867 with zero predecessors is created. Such a Phi node is called Phi0
1868 node. The Phi0 is then added to the list of Phi0 nodes in this block
1869 to be matured by mature_immBlock later.
1870 The Phi0 has to remember the pos of it's internal value. If the real
1871 Phi is computed, pos is used to update the array with the local
1873 res = new_rd_Phi0 (current_ir_graph, block, mode);
1874 res->attr.phi0_pos = pos;
1875 res->link = block->link;
1879 /* If we get here, the frontend missed a use-before-definition error */
1882 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1883 assert (mode->code >= irm_F && mode->code <= irm_P);
1884 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1885 get_mode_null(mode));
1888 /* The local valid value is available now. */
1889 block->attr.block.graph_arr[pos] = res;
1894 #endif /* USE_FAST_PHI_CONSTRUCTION */
1896 /* ************************************************************************** */
1898 /** Finalize a Block node, when all control flows are known. */
1899 /** Acceptable parameters are only Block nodes. */
1901 mature_immBlock (ir_node *block)
1908 assert (get_irn_opcode(block) == iro_Block);
1909 /* @@@ should be commented in
1910 assert (!get_Block_matured(block) && "Block already matured"); */
1912 if (!get_Block_matured(block)) {
1913 ins = ARR_LEN (block->in)-1;
1914 /* Fix block parameters */
1915 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1917 /* An array for building the Phi nodes. */
1918 NEW_ARR_A (ir_node *, nin, ins);
1920 /* Traverse a chain of Phi nodes attached to this block and mature
1922 for (n = block->link; n; n=next) {
1923 inc_irg_visited(current_ir_graph);
1925 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1928 block->attr.block.matured = 1;
1930 /* Now, as the block is a finished firm node, we can optimize it.
1931 Since other nodes have been allocated since the block was created
1932 we can not free the node on the obstack. Therefore we have to call
1934 Unfortunately the optimization does not change a lot, as all allocated
1935 nodes refer to the unoptimized node.
1936 We can call _2, as global cse has no effect on blocks. */
1937 block = optimize_in_place_2(block);
1938 IRN_VRFY_IRG(block, current_ir_graph);
1943 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1945 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1950 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1952 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1957 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1959 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1965 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1967 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1972 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1974 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1979 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1982 assert(arg->op == op_Cond);
1983 arg->attr.c.kind = fragmentary;
1984 arg->attr.c.default_proj = max_proj;
1985 res = new_Proj (arg, mode_X, max_proj);
1990 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1992 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
1997 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1999 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2003 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2005 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2010 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2012 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2017 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2019 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2025 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2027 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2032 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2034 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2039 * allocate the frag array
2041 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2042 if (get_opt_precise_exc_context()) {
2043 if ((current_ir_graph->phase_state == phase_building) &&
2044 (get_irn_op(res) == op) && /* Could be optimized away. */
2045 !*frag_store) /* Could be a cse where the arr is already set. */ {
2046 *frag_store = new_frag_arr(res);
2053 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2056 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2058 res->attr.except.pin_state = op_pin_state_pinned;
2059 #if PRECISE_EXC_CONTEXT
2060 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2067 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2070 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2072 res->attr.except.pin_state = op_pin_state_pinned;
2073 #if PRECISE_EXC_CONTEXT
2074 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2081 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2084 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2086 res->attr.except.pin_state = op_pin_state_pinned;
2087 #if PRECISE_EXC_CONTEXT
2088 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2095 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2098 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2100 res->attr.except.pin_state = op_pin_state_pinned;
2101 #if PRECISE_EXC_CONTEXT
2102 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2109 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2111 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2116 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2118 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2123 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2125 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2130 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2132 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2137 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2139 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2144 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2146 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2151 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2153 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2158 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2160 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2165 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2167 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2172 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2174 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2179 new_d_Jmp (dbg_info* db)
2181 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2185 new_d_Cond (dbg_info* db, ir_node *c)
2187 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2191 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2195 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2196 store, callee, arity, in, tp);
2197 #if PRECISE_EXC_CONTEXT
2198 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2205 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2207 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2212 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2214 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2219 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2222 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2224 #if PRECISE_EXC_CONTEXT
2225 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2232 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2235 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2237 #if PRECISE_EXC_CONTEXT
2238 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2245 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2249 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2250 store, size, alloc_type, where);
2251 #if PRECISE_EXC_CONTEXT
2252 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2259 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2261 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2262 store, ptr, size, free_type);
2266 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2267 /* GL: objptr was called frame before. Frame was a bad choice for the name
2268 as the operand could as well be a pointer to a dynamic object. */
2270 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2271 store, objptr, 0, NULL, ent);
2275 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2277 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2278 store, objptr, n_index, index, sel);
2282 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2284 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2285 store, objptr, ent));
2289 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2291 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2296 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2298 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2303 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2305 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2313 return __new_d_Bad();
2317 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2319 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2324 new_d_Unknown (ir_mode *m)
2326 return new_rd_Unknown(current_ir_graph, m);
2330 new_d_CallBegin (dbg_info *db, ir_node *call)
2333 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2338 new_d_EndReg (dbg_info *db)
2341 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2346 new_d_EndExcept (dbg_info *db)
2349 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2354 new_d_Break (dbg_info *db)
2356 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2360 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2362 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2369 return __new_d_NoMem();
2372 /* ********************************************************************* */
2373 /* Comfortable interface with automatic Phi node construction. */
2374 /* (Uses also constructors of ?? interface, except new_Block. */
2375 /* ********************************************************************* */
2377 /* * Block construction **/
2378 /* immature Block without predecessors */
2379 ir_node *new_d_immBlock (dbg_info* db) {
2382 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2383 /* creates a new dynamic in-array as length of in is -1 */
2384 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2385 current_ir_graph->current_block = res;
2386 res->attr.block.matured = 0;
2387 res->attr.block.dead = 0;
2388 /* res->attr.block.exc = exc_normal; */
2389 /* res->attr.block.handler_entry = 0; */
2390 res->attr.block.irg = current_ir_graph;
2391 res->attr.block.backedge = NULL;
2392 res->attr.block.in_cg = NULL;
2393 res->attr.block.cg_backedge = NULL;
2394 set_Block_block_visited(res, 0);
2396 /* Create and initialize array for Phi-node construction. */
2397 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2398 current_ir_graph->n_loc);
2399 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2401 /* Immature block may not be optimized! */
2402 IRN_VRFY_IRG(res, current_ir_graph);
2408 new_immBlock (void) {
2409 return new_d_immBlock(NULL);
2412 /* add an adge to a jmp/control flow node */
2414 add_immBlock_pred (ir_node *block, ir_node *jmp)
2416 if (block->attr.block.matured) {
2417 assert(0 && "Error: Block already matured!\n");
2420 assert(jmp != NULL);
2421 ARR_APP1(ir_node *, block->in, jmp);
2425 /* changing the current block */
2427 set_cur_block (ir_node *target)
2429 current_ir_graph->current_block = target;
2432 /* ************************ */
2433 /* parameter administration */
2435 /* get a value from the parameter array from the current block by its index */
2437 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2439 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2440 inc_irg_visited(current_ir_graph);
2442 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2444 /* get a value from the parameter array from the current block by its index */
2446 get_value (int pos, ir_mode *mode)
2448 return get_d_value(NULL, pos, mode);
2451 /* set a value at position pos in the parameter array from the current block */
2453 set_value (int pos, ir_node *value)
2455 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2456 assert(pos+1 < current_ir_graph->n_loc);
2457 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2460 /* get the current store */
2464 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2465 /* GL: one could call get_value instead */
2466 inc_irg_visited(current_ir_graph);
2467 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2470 /* set the current store */
2472 set_store (ir_node *store)
2474 /* GL: one could call set_value instead */
2475 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2476 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2480 keep_alive (ir_node *ka)
2482 add_End_keepalive(current_ir_graph->end, ka);
2485 /** Useful access routines **/
2486 /* Returns the current block of the current graph. To set the current
2487 block use set_cur_block. */
2488 ir_node *get_cur_block() {
2489 return get_irg_current_block(current_ir_graph);
2492 /* Returns the frame type of the current graph */
2493 type *get_cur_frame_type() {
2494 return get_irg_frame_type(current_ir_graph);
2498 /* ********************************************************************* */
2501 /* call once for each run of the library */
2503 init_cons (default_initialize_local_variable_func_t *func)
2505 default_initialize_local_variable = func;
2508 /* call for each graph */
2510 finalize_cons (ir_graph *irg) {
2511 irg->phase_state = phase_high;
2515 ir_node *new_Block(int arity, ir_node **in) {
2516 return new_d_Block(NULL, arity, in);
2518 ir_node *new_Start (void) {
2519 return new_d_Start(NULL);
2521 ir_node *new_End (void) {
2522 return new_d_End(NULL);
2524 ir_node *new_Jmp (void) {
2525 return new_d_Jmp(NULL);
2527 ir_node *new_Cond (ir_node *c) {
2528 return new_d_Cond(NULL, c);
2530 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2531 return new_d_Return(NULL, store, arity, in);
2533 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2534 return new_d_Raise(NULL, store, obj);
2536 ir_node *new_Const (ir_mode *mode, tarval *con) {
2537 return new_d_Const(NULL, mode, con);
2540 ir_node *new_Const_type(tarval *con, type *tp) {
2541 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2544 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2545 return new_d_SymConst(NULL, value, kind);
2547 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2548 return new_d_simpleSel(NULL, store, objptr, ent);
2550 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2552 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2554 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2555 return new_d_InstOf (NULL, store, objptr, ent);
2557 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2559 return new_d_Call(NULL, store, callee, arity, in, tp);
2561 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2562 return new_d_Add(NULL, op1, op2, mode);
2564 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2565 return new_d_Sub(NULL, op1, op2, mode);
2567 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2568 return new_d_Minus(NULL, op, mode);
2570 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2571 return new_d_Mul(NULL, op1, op2, mode);
2573 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2574 return new_d_Quot(NULL, memop, op1, op2);
2576 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2577 return new_d_DivMod(NULL, memop, op1, op2);
2579 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2580 return new_d_Div(NULL, memop, op1, op2);
2582 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2583 return new_d_Mod(NULL, memop, op1, op2);
2585 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2586 return new_d_Abs(NULL, op, mode);
2588 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2589 return new_d_And(NULL, op1, op2, mode);
2591 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2592 return new_d_Or(NULL, op1, op2, mode);
2594 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2595 return new_d_Eor(NULL, op1, op2, mode);
2597 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2598 return new_d_Not(NULL, op, mode);
2600 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2601 return new_d_Shl(NULL, op, k, mode);
2603 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2604 return new_d_Shr(NULL, op, k, mode);
2606 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2607 return new_d_Shrs(NULL, op, k, mode);
2609 #define new_Rotate new_Rot
2610 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2611 return new_d_Rot(NULL, op, k, mode);
2613 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2614 return new_d_Cmp(NULL, op1, op2);
2616 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2617 return new_d_Conv(NULL, op, mode);
2619 ir_node *new_Cast (ir_node *op, type *to_tp) {
2620 return new_d_Cast(NULL, op, to_tp);
2622 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2623 return new_d_Phi(NULL, arity, in, mode);
2625 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2626 return new_d_Load(NULL, store, addr, mode);
2628 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2629 return new_d_Store(NULL, store, addr, val);
2631 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2632 where_alloc where) {
2633 return new_d_Alloc(NULL, store, size, alloc_type, where);
2635 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2637 return new_d_Free(NULL, store, ptr, size, free_type);
2639 ir_node *new_Sync (int arity, ir_node **in) {
2640 return new_d_Sync(NULL, arity, in);
2642 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2643 return new_d_Proj(NULL, arg, mode, proj);
2645 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2646 return new_d_defaultProj(NULL, arg, max_proj);
2648 ir_node *new_Tuple (int arity, ir_node **in) {
2649 return new_d_Tuple(NULL, arity, in);
2651 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2652 return new_d_Id(NULL, val, mode);
2654 ir_node *new_Bad (void) {
2657 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2658 return new_d_Confirm (NULL, val, bound, cmp);
2660 ir_node *new_Unknown(ir_mode *m) {
2661 return new_d_Unknown(m);
2663 ir_node *new_CallBegin (ir_node *callee) {
2664 return new_d_CallBegin(NULL, callee);
2666 ir_node *new_EndReg (void) {
2667 return new_d_EndReg(NULL);
2669 ir_node *new_EndExcept (void) {
2670 return new_d_EndExcept(NULL);
2672 ir_node *new_Break (void) {
2673 return new_d_Break(NULL);
2675 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2676 return new_d_Filter(NULL, arg, mode, proj);
2678 ir_node *new_NoMem (void) {
2679 return new_d_NoMem();