3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irgraph_t.h"
29 # include "irnode_t.h"
30 # include "irmode_t.h"
31 # include "ircons_t.h"
32 # include "firm_common_t.h"
38 # include "irbackedge_t.h"
39 # include "irflag_t.h"
41 #if USE_EXPLICIT_PHI_IN_STACK
42 /* A stack needed for the automatic Phi node construction in constructor
43 Phi_in. Redefinition in irgraph.c!! */
48 typedef struct Phi_in_stack Phi_in_stack;
51 /* when we need verifying */
53 # define IRN_VRFY_IRG(res, irg)
55 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
59 * language dependant initialization variable
61 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
63 /* -------------------------------------------- */
64 /* privat interfaces, for professional use only */
65 /* -------------------------------------------- */
67 /* Constructs a Block with a fixed number of predecessors.
68 Does not set current_block. Can not be used with automatic
69 Phi node construction. */
71 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
87 IRN_VRFY_IRG(res, irg);
92 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
96 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
97 /* res->attr.start.irg = irg; */
99 IRN_VRFY_IRG(res, irg);
104 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
108 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
110 IRN_VRFY_IRG(res, irg);
114 /* Creates a Phi node with all predecessors. Calling this constructor
115 is only allowed if the corresponding block is mature. */
117 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
121 bool has_unknown = false;
123 /* Don't assert that block matured: the use of this constructor is strongly
125 if ( get_Block_matured(block) )
126 assert( get_irn_arity(block) == arity );
128 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
130 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
132 for (i = arity-1; i >= 0; i--)
133 if (get_irn_op(in[i]) == op_Unknown) {
138 if (!has_unknown) res = optimize_node (res);
139 IRN_VRFY_IRG(res, irg);
141 /* Memory Phis in endless loops must be kept alive.
142 As we can't distinguish these easily we keep all of them alive. */
143 if ((res->op == op_Phi) && (mode == mode_M))
144 add_End_keepalive(irg->end, res);
149 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
153 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
154 res->attr.con.tv = con;
155 set_Const_type(res, tp); /* Call method because of complex assertion. */
156 res = optimize_node (res);
157 assert(get_Const_type(res) == tp);
158 IRN_VRFY_IRG(res, irg);
164 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
166 type *tp = firm_unknown_type;
167 /* removing this somehow causes errors in jack. */
168 return new_rd_Const_type (db, irg, block, mode, con, tp);
172 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
176 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
177 res = optimize_node(res);
178 IRN_VRFY_IRG(res, irg);
183 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
188 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
189 res->attr.proj = proj;
192 assert(get_Proj_pred(res));
193 assert(get_nodes_block(get_Proj_pred(res)));
195 res = optimize_node(res);
197 IRN_VRFY_IRG(res, irg);
203 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
207 assert(arg->op == op_Cond);
208 arg->attr.c.kind = fragmentary;
209 arg->attr.c.default_proj = max_proj;
210 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
215 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
219 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
220 res = optimize_node(res);
221 IRN_VRFY_IRG(res, irg);
226 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
230 assert(is_atomic_type(to_tp));
232 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
233 res->attr.cast.totype = to_tp;
234 res = optimize_node(res);
235 IRN_VRFY_IRG(res, irg);
240 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
244 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
245 res = optimize_node (res);
246 IRN_VRFY_IRG(res, irg);
251 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
252 ir_node *op1, ir_node *op2, ir_mode *mode)
259 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
260 res = optimize_node(res);
261 IRN_VRFY_IRG(res, irg);
266 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
267 ir_node *op1, ir_node *op2, ir_mode *mode)
274 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
275 res = optimize_node (res);
276 IRN_VRFY_IRG(res, irg);
281 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
282 ir_node *op, ir_mode *mode)
286 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
287 res = optimize_node(res);
288 IRN_VRFY_IRG(res, irg);
293 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
294 ir_node *op1, ir_node *op2, ir_mode *mode)
301 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
308 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
309 ir_node *memop, ir_node *op1, ir_node *op2)
317 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
318 res = optimize_node(res);
319 IRN_VRFY_IRG(res, irg);
324 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
325 ir_node *memop, ir_node *op1, ir_node *op2)
333 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
334 res = optimize_node(res);
335 IRN_VRFY_IRG(res, irg);
340 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
341 ir_node *memop, ir_node *op1, ir_node *op2)
349 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
350 res = optimize_node(res);
351 IRN_VRFY_IRG(res, irg);
356 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
357 ir_node *memop, ir_node *op1, ir_node *op2)
365 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
366 res = optimize_node(res);
367 IRN_VRFY_IRG(res, irg);
372 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
373 ir_node *op1, ir_node *op2, ir_mode *mode)
380 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
381 res = optimize_node(res);
382 IRN_VRFY_IRG(res, irg);
387 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
388 ir_node *op1, ir_node *op2, ir_mode *mode)
395 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
396 res = optimize_node(res);
397 IRN_VRFY_IRG(res, irg);
402 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
403 ir_node *op1, ir_node *op2, ir_mode *mode)
410 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
411 res = optimize_node (res);
412 IRN_VRFY_IRG(res, irg);
417 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
418 ir_node *op, ir_mode *mode)
422 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
423 res = optimize_node(res);
424 IRN_VRFY_IRG(res, irg);
429 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
430 ir_node *op, ir_node *k, ir_mode *mode)
437 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
438 res = optimize_node(res);
439 IRN_VRFY_IRG(res, irg);
444 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
445 ir_node *op, ir_node *k, ir_mode *mode)
452 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
453 res = optimize_node(res);
454 IRN_VRFY_IRG(res, irg);
459 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
460 ir_node *op, ir_node *k, ir_mode *mode)
467 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
468 res = optimize_node(res);
469 IRN_VRFY_IRG(res, irg);
474 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
475 ir_node *op, ir_node *k, ir_mode *mode)
482 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
483 res = optimize_node(res);
484 IRN_VRFY_IRG(res, irg);
489 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
490 ir_node *op, ir_mode *mode)
494 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
495 res = optimize_node (res);
496 IRN_VRFY_IRG(res, irg);
501 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
502 ir_node *op1, ir_node *op2)
509 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
510 res = optimize_node(res);
511 IRN_VRFY_IRG(res, irg);
516 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
520 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
521 res = optimize_node (res);
522 IRN_VRFY_IRG (res, irg);
527 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
531 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
532 res->attr.c.kind = dense;
533 res->attr.c.default_proj = 0;
534 res = optimize_node (res);
535 IRN_VRFY_IRG(res, irg);
540 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
541 ir_node *callee, int arity, ir_node **in, type *tp)
548 NEW_ARR_A(ir_node *, r_in, r_arity);
551 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
553 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
555 assert((get_unknown_type() == tp) || is_method_type(tp));
556 set_Call_type(res, tp);
557 res->attr.call.exc.pin_state = op_pin_state_pinned;
558 res->attr.call.callee_arr = NULL;
559 res = optimize_node(res);
560 IRN_VRFY_IRG(res, irg);
565 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
566 ir_node *store, int arity, ir_node **in)
573 NEW_ARR_A (ir_node *, r_in, r_arity);
575 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
576 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
577 res = optimize_node(res);
578 IRN_VRFY_IRG(res, irg);
583 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
590 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
591 res = optimize_node(res);
592 IRN_VRFY_IRG(res, irg);
597 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
598 ir_node *store, ir_node *adr, ir_mode *mode)
605 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
606 res->attr.load.exc.pin_state = op_pin_state_pinned;
607 res->attr.load.load_mode = mode;
608 res->attr.load.volatility = volatility_non_volatile;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
615 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
616 ir_node *store, ir_node *adr, ir_node *val)
624 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
625 res->attr.store.exc.pin_state = op_pin_state_pinned;
626 res->attr.store.volatility = volatility_non_volatile;
627 res = optimize_node(res);
628 IRN_VRFY_IRG(res, irg);
633 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
634 ir_node *size, type *alloc_type, where_alloc where)
641 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
642 res->attr.a.exc.pin_state = op_pin_state_pinned;
643 res->attr.a.where = where;
644 res->attr.a.type = alloc_type;
645 res = optimize_node(res);
646 IRN_VRFY_IRG(res, irg);
651 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
652 ir_node *ptr, ir_node *size, type *free_type)
660 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
661 res->attr.f = free_type;
662 res = optimize_node(res);
663 IRN_VRFY_IRG(res, irg);
668 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
669 int arity, ir_node **in, entity *ent)
675 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
678 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
681 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
682 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
683 res->attr.s.ent = ent;
684 res = optimize_node(res);
685 IRN_VRFY_IRG(res, irg);
690 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
691 ir_node *objptr, type *ent)
698 NEW_ARR_A(ir_node *, r_in, r_arity);
702 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
703 res->attr.io.ent = ent;
705 /* res = optimize(res); */
706 IRN_VRFY_IRG(res, irg);
711 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
712 symconst_kind symkind, type *tp) {
716 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
721 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
723 res->attr.i.num = symkind;
724 res->attr.i.sym = value;
727 res = optimize_node(res);
728 IRN_VRFY_IRG(res, irg);
733 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
734 symconst_kind symkind)
736 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
740 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
741 symconst_symbol sym = {(type *)symbol};
742 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
745 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
746 symconst_symbol sym = {(type *)symbol};
747 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
750 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
751 symconst_symbol sym = {symbol};
752 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
755 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
756 symconst_symbol sym = {symbol};
757 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
761 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
765 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
766 res = optimize_node(res);
767 IRN_VRFY_IRG(res, irg);
772 new_rd_Bad (ir_graph *irg)
778 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
780 ir_node *in[2], *res;
784 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
785 res->attr.confirm_cmp = cmp;
786 res = optimize_node (res);
787 IRN_VRFY_IRG(res, irg);
792 new_rd_Unknown (ir_graph *irg, ir_mode *m)
794 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
798 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
803 in[0] = get_Call_ptr(call);
804 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
805 /* res->attr.callbegin.irg = irg; */
806 res->attr.callbegin.call = call;
807 res = optimize_node(res);
808 IRN_VRFY_IRG(res, irg);
813 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
817 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
819 IRN_VRFY_IRG(res, irg);
824 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
828 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
829 irg->end_except = res;
830 IRN_VRFY_IRG (res, irg);
835 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
839 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
840 res = optimize_node(res);
841 IRN_VRFY_IRG(res, irg);
846 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
851 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
852 res->attr.filter.proj = proj;
853 res->attr.filter.in_cg = NULL;
854 res->attr.filter.backedge = NULL;
857 assert(get_Proj_pred(res));
858 assert(get_nodes_block(get_Proj_pred(res)));
860 res = optimize_node(res);
861 IRN_VRFY_IRG(res, irg);
866 new_rd_NoMem (ir_graph *irg) {
871 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
872 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
881 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
884 res = optimize_node(res);
885 IRN_VRFY_IRG(res, irg);
890 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
891 return new_rd_Block(NULL, irg, arity, in);
893 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
894 return new_rd_Start(NULL, irg, block);
896 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
897 return new_rd_End(NULL, irg, block);
899 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
900 return new_rd_Jmp(NULL, irg, block);
902 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
903 return new_rd_Cond(NULL, irg, block, c);
905 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
906 ir_node *store, int arity, ir_node **in) {
907 return new_rd_Return(NULL, irg, block, store, arity, in);
909 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
910 ir_node *store, ir_node *obj) {
911 return new_rd_Raise(NULL, irg, block, store, obj);
913 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
914 ir_mode *mode, tarval *con) {
915 return new_rd_Const(NULL, irg, block, mode, con);
917 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
918 symconst_symbol value, symconst_kind symkind) {
919 return new_rd_SymConst(NULL, irg, block, value, symkind);
921 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
922 ir_node *objptr, int n_index, ir_node **index,
924 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
926 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
928 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
930 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
931 ir_node *callee, int arity, ir_node **in,
933 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
935 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
936 ir_node *op1, ir_node *op2, ir_mode *mode) {
937 return new_rd_Add(NULL, irg, block, op1, op2, mode);
939 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
940 ir_node *op1, ir_node *op2, ir_mode *mode) {
941 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
943 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
944 ir_node *op, ir_mode *mode) {
945 return new_rd_Minus(NULL, irg, block, op, mode);
947 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
948 ir_node *op1, ir_node *op2, ir_mode *mode) {
949 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
951 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
952 ir_node *memop, ir_node *op1, ir_node *op2) {
953 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
955 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
956 ir_node *memop, ir_node *op1, ir_node *op2) {
957 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
959 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
960 ir_node *memop, ir_node *op1, ir_node *op2) {
961 return new_rd_Div(NULL, irg, block, memop, op1, op2);
963 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
964 ir_node *memop, ir_node *op1, ir_node *op2) {
965 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
967 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
968 ir_node *op, ir_mode *mode) {
969 return new_rd_Abs(NULL, irg, block, op, mode);
971 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
972 ir_node *op1, ir_node *op2, ir_mode *mode) {
973 return new_rd_And(NULL, irg, block, op1, op2, mode);
975 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
976 ir_node *op1, ir_node *op2, ir_mode *mode) {
977 return new_rd_Or(NULL, irg, block, op1, op2, mode);
979 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
980 ir_node *op1, ir_node *op2, ir_mode *mode) {
981 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
983 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
984 ir_node *op, ir_mode *mode) {
985 return new_rd_Not(NULL, irg, block, op, mode);
987 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
988 ir_node *op1, ir_node *op2) {
989 return new_rd_Cmp(NULL, irg, block, op1, op2);
991 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
992 ir_node *op, ir_node *k, ir_mode *mode) {
993 return new_rd_Shl(NULL, irg, block, op, k, mode);
995 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
996 ir_node *op, ir_node *k, ir_mode *mode) {
997 return new_rd_Shr(NULL, irg, block, op, k, mode);
999 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1000 ir_node *op, ir_node *k, ir_mode *mode) {
1001 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1003 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1004 ir_node *op, ir_node *k, ir_mode *mode) {
1005 return new_rd_Rot(NULL, irg, block, op, k, mode);
1007 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1008 ir_node *op, ir_mode *mode) {
1009 return new_rd_Conv(NULL, irg, block, op, mode);
1011 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1012 return new_rd_Cast(NULL, irg, block, op, to_tp);
1014 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1015 ir_node **in, ir_mode *mode) {
1016 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1018 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1019 ir_node *store, ir_node *adr, ir_mode *mode) {
1020 return new_rd_Load(NULL, irg, block, store, adr, mode);
1022 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1023 ir_node *store, ir_node *adr, ir_node *val) {
1024 return new_rd_Store(NULL, irg, block, store, adr, val);
1026 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1027 ir_node *size, type *alloc_type, where_alloc where) {
1028 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1030 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1031 ir_node *ptr, ir_node *size, type *free_type) {
1032 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1034 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1035 return new_rd_Sync(NULL, irg, block, arity, in);
1037 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1038 ir_mode *mode, long proj) {
1039 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1041 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1043 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1045 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1046 int arity, ir_node **in) {
1047 return new_rd_Tuple(NULL, irg, block, arity, in );
1049 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1050 ir_node *val, ir_mode *mode) {
1051 return new_rd_Id(NULL, irg, block, val, mode);
1053 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1054 return new_rd_Bad(irg);
1056 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1057 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1059 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1060 return new_rd_Unknown(irg, m);
1062 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1063 return new_rd_CallBegin(NULL, irg, block, callee);
1065 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1066 return new_rd_EndReg(NULL, irg, block);
1068 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1069 return new_rd_EndExcept(NULL, irg, block);
1071 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1072 return new_rd_Break(NULL, irg, block);
1074 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1075 ir_mode *mode, long proj) {
1076 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1078 INLINE ir_node *new_r_NoMem (ir_graph *irg) {
1079 return new_rd_NoMem(irg);
1081 INLINE ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1082 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1083 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1087 /** ********************/
1088 /** public interfaces */
1089 /** construction tools */
1093 * - create a new Start node in the current block
1095 * @return s - pointer to the created Start node
1100 new_d_Start (dbg_info* db)
1104 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1105 op_Start, mode_T, 0, NULL);
1106 /* res->attr.start.irg = current_ir_graph; */
1108 res = optimize_node(res);
1109 IRN_VRFY_IRG(res, current_ir_graph);
1114 new_d_End (dbg_info* db)
1117 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1118 op_End, mode_X, -1, NULL);
1119 res = optimize_node(res);
1120 IRN_VRFY_IRG(res, current_ir_graph);
1125 /* Constructs a Block with a fixed number of predecessors.
1126 Does set current_block. Can be used with automatic Phi
1127 node construction. */
1129 new_d_Block (dbg_info* db, int arity, ir_node **in)
1133 bool has_unknown = false;
1135 res = new_rd_Block(db, current_ir_graph, arity, in);
1137 /* Create and initialize array for Phi-node construction. */
1138 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1139 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1140 current_ir_graph->n_loc);
1141 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1144 for (i = arity-1; i >= 0; i--)
1145 if (get_irn_op(in[i]) == op_Unknown) {
1150 if (!has_unknown) res = optimize_node(res);
1151 current_ir_graph->current_block = res;
1153 IRN_VRFY_IRG(res, current_ir_graph);
1158 /* ***********************************************************************/
1159 /* Methods necessary for automatic Phi node creation */
1161 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1162 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1163 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1164 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1166 Call Graph: ( A ---> B == A "calls" B)
1168 get_value mature_immBlock
1176 get_r_value_internal |
1180 new_rd_Phi0 new_rd_Phi_in
1182 * *************************************************************************** */
1184 /** Creates a Phi node with 0 predecessors */
1185 static INLINE ir_node *
1186 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1190 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1191 IRN_VRFY_IRG(res, irg);
1195 /* There are two implementations of the Phi node construction. The first
1196 is faster, but does not work for blocks with more than 2 predecessors.
1197 The second works always but is slower and causes more unnecessary Phi
1199 Select the implementations by the following preprocessor flag set in
1201 #if USE_FAST_PHI_CONSTRUCTION
1203 /* This is a stack used for allocating and deallocating nodes in
1204 new_rd_Phi_in. The original implementation used the obstack
1205 to model this stack, now it is explicit. This reduces side effects.
1207 #if USE_EXPLICIT_PHI_IN_STACK
1208 INLINE Phi_in_stack *
1209 new_Phi_in_stack(void) {
1212 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1214 res->stack = NEW_ARR_F (ir_node *, 0);
1221 free_Phi_in_stack(Phi_in_stack *s) {
1222 DEL_ARR_F(s->stack);
1226 free_to_Phi_in_stack(ir_node *phi) {
1227 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1228 current_ir_graph->Phi_in_stack->pos)
1229 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1231 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1233 (current_ir_graph->Phi_in_stack->pos)++;
1236 static INLINE ir_node *
1237 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1238 int arity, ir_node **in) {
1240 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1241 int pos = current_ir_graph->Phi_in_stack->pos;
1245 /* We need to allocate a new node */
1246 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1247 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1249 /* reuse the old node and initialize it again. */
1252 assert (res->kind == k_ir_node);
1253 assert (res->op == op_Phi);
1257 assert (arity >= 0);
1258 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1259 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1261 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1263 (current_ir_graph->Phi_in_stack->pos)--;
1267 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1269 /* Creates a Phi node with a given, fixed array **in of predecessors.
1270 If the Phi node is unnecessary, as the same value reaches the block
1271 through all control flow paths, it is eliminated and the value
1272 returned directly. This constructor is only intended for use in
1273 the automatic Phi node generation triggered by get_value or mature.
1274 The implementation is quite tricky and depends on the fact, that
1275 the nodes are allocated on a stack:
1276 The in array contains predecessors and NULLs. The NULLs appear,
1277 if get_r_value_internal, that computed the predecessors, reached
1278 the same block on two paths. In this case the same value reaches
1279 this block on both paths, there is no definition in between. We need
1280 not allocate a Phi where these path's merge, but we have to communicate
1281 this fact to the caller. This happens by returning a pointer to the
1282 node the caller _will_ allocate. (Yes, we predict the address. We can
1283 do so because the nodes are allocated on the obstack.) The caller then
1284 finds a pointer to itself and, when this routine is called again,
1287 static INLINE ir_node *
1288 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1291 ir_node *res, *known;
1293 /* Allocate a new node on the obstack. This can return a node to
1294 which some of the pointers in the in-array already point.
1295 Attention: the constructor copies the in array, i.e., the later
1296 changes to the array in this routine do not affect the
1297 constructed node! If the in array contains NULLs, there will be
1298 missing predecessors in the returned node. Is this a possible
1299 internal state of the Phi node generation? */
1300 #if USE_EXPLICIT_PHI_IN_STACK
1301 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1303 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1304 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1307 /* The in-array can contain NULLs. These were returned by
1308 get_r_value_internal if it reached the same block/definition on a
1309 second path. The NULLs are replaced by the node itself to
1310 simplify the test in the next loop. */
1311 for (i = 0; i < ins; ++i) {
1316 /* This loop checks whether the Phi has more than one predecessor.
1317 If so, it is a real Phi node and we break the loop. Else the Phi
1318 node merges the same definition on several paths and therefore is
1320 for (i = 0; i < ins; ++i)
1322 if (in[i] == res || in[i] == known) continue;
1330 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1332 #if USE_EXPLICIT_PHI_IN_STACK
1333 free_to_Phi_in_stack(res);
1335 obstack_free (current_ir_graph->obst, res);
1339 res = optimize_node (res);
1340 IRN_VRFY_IRG(res, irg);
1343 /* return the pointer to the Phi node. This node might be deallocated! */
1348 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1351 allocates and returns this node. The routine called to allocate the
1352 node might optimize it away and return a real value, or even a pointer
1353 to a deallocated Phi node on top of the obstack!
1354 This function is called with an in-array of proper size. **/
1356 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1358 ir_node *prevBlock, *res;
1361 /* This loop goes to all predecessor blocks of the block the Phi node is in
1362 and there finds the operands of the Phi node by calling
1363 get_r_value_internal. */
1364 for (i = 1; i <= ins; ++i) {
1365 assert (block->in[i]);
1366 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1368 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1371 /* After collecting all predecessors into the array nin a new Phi node
1372 with these predecessors is created. This constructor contains an
1373 optimization: If all predecessors of the Phi node are identical it
1374 returns the only operand instead of a new Phi node. If the value
1375 passes two different control flow edges without being defined, and
1376 this is the second path treated, a pointer to the node that will be
1377 allocated for the first path (recursion) is returned. We already
1378 know the address of this node, as it is the next node to be allocated
1379 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1380 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1382 /* Now we now the value for "pos" and can enter it in the array with
1383 all known local variables. Attention: this might be a pointer to
1384 a node, that later will be allocated!!! See new_rd_Phi_in.
1385 If this is called in mature, after some set_value in the same block,
1386 the proper value must not be overwritten:
1388 get_value (makes Phi0, put's it into graph_arr)
1389 set_value (overwrites Phi0 in graph_arr)
1390 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1393 if (!block->attr.block.graph_arr[pos]) {
1394 block->attr.block.graph_arr[pos] = res;
1396 /* printf(" value already computed by %s\n",
1397 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1403 /* This function returns the last definition of a variable. In case
1404 this variable was last defined in a previous block, Phi nodes are
1405 inserted. If the part of the firm graph containing the definition
1406 is not yet constructed, a dummy Phi node is returned. */
1408 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1411 /* There are 4 cases to treat.
1413 1. The block is not mature and we visit it the first time. We can not
1414 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1415 predecessors is returned. This node is added to the linked list (field
1416 "link") of the containing block to be completed when this block is
1417 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1420 2. The value is already known in this block, graph_arr[pos] is set and we
1421 visit the block the first time. We can return the value without
1422 creating any new nodes.
1424 3. The block is mature and we visit it the first time. A Phi node needs
1425 to be created (phi_merge). If the Phi is not needed, as all it's
1426 operands are the same value reaching the block through different
1427 paths, it's optimized away and the value itself is returned.
1429 4. The block is mature, and we visit it the second time. Now two
1430 subcases are possible:
1431 * The value was computed completely the last time we were here. This
1432 is the case if there is no loop. We can return the proper value.
1433 * The recursion that visited this node and set the flag did not
1434 return yet. We are computing a value in a loop and need to
1435 break the recursion without knowing the result yet.
1436 @@@ strange case. Straight forward we would create a Phi before
1437 starting the computation of it's predecessors. In this case we will
1438 find a Phi here in any case. The problem is that this implementation
1439 only creates a Phi after computing the predecessors, so that it is
1440 hard to compute self references of this Phi. @@@
1441 There is no simple check for the second subcase. Therefore we check
1442 for a second visit and treat all such cases as the second subcase.
1443 Anyways, the basic situation is the same: we reached a block
1444 on two paths without finding a definition of the value: No Phi
1445 nodes are needed on both paths.
1446 We return this information "Two paths, no Phi needed" by a very tricky
1447 implementation that relies on the fact that an obstack is a stack and
1448 will return a node with the same address on different allocations.
1449 Look also at phi_merge and new_rd_phi_in to understand this.
1450 @@@ Unfortunately this does not work, see testprogram
1451 three_cfpred_example.
1455 /* case 4 -- already visited. */
1456 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1458 /* visited the first time */
1459 set_irn_visited(block, get_irg_visited(current_ir_graph));
1461 /* Get the local valid value */
1462 res = block->attr.block.graph_arr[pos];
1464 /* case 2 -- If the value is actually computed, return it. */
1465 if (res) return res;
1467 if (block->attr.block.matured) { /* case 3 */
1469 /* The Phi has the same amount of ins as the corresponding block. */
1470 int ins = get_irn_arity(block);
1472 NEW_ARR_A (ir_node *, nin, ins);
1474 /* Phi merge collects the predecessors and then creates a node. */
1475 res = phi_merge (block, pos, mode, nin, ins);
1477 } else { /* case 1 */
1478 /* The block is not mature, we don't know how many in's are needed. A Phi
1479 with zero predecessors is created. Such a Phi node is called Phi0
1480 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1481 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1483 The Phi0 has to remember the pos of it's internal value. If the real
1484 Phi is computed, pos is used to update the array with the local
1487 res = new_rd_Phi0 (current_ir_graph, block, mode);
1488 res->attr.phi0_pos = pos;
1489 res->link = block->link;
1493 /* If we get here, the frontend missed a use-before-definition error */
1496 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1497 assert (mode->code >= irm_F && mode->code <= irm_P);
1498 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1499 tarval_mode_null[mode->code]);
1502 /* The local valid value is available now. */
1503 block->attr.block.graph_arr[pos] = res;
1511 it starts the recursion. This causes an Id at the entry of
1512 every block that has no definition of the value! **/
1514 #if USE_EXPLICIT_PHI_IN_STACK
1516 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1517 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1520 static INLINE ir_node *
1521 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1522 ir_node **in, int ins, ir_node *phi0)
1525 ir_node *res, *known;
1527 /* Allocate a new node on the obstack. The allocation copies the in
1529 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1530 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1532 /* This loop checks whether the Phi has more than one predecessor.
1533 If so, it is a real Phi node and we break the loop. Else the
1534 Phi node merges the same definition on several paths and therefore
1535 is not needed. Don't consider Bad nodes! */
1537 for (i=0; i < ins; ++i)
1541 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1543 /* Optimize self referencing Phis: We can't detect them yet properly, as
1544 they still refer to the Phi0 they will replace. So replace right now. */
1545 if (phi0 && in[i] == phi0) in[i] = res;
1547 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1555 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1558 obstack_free (current_ir_graph->obst, res);
1559 if (is_Phi(known)) {
1560 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1561 order, an enclosing Phi know may get superfluous. */
1562 res = optimize_in_place_2(known);
1563 if (res != known) { exchange(known, res); }
1568 /* A undefined value, e.g., in unreachable code. */
1572 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1573 IRN_VRFY_IRG(res, irg);
1574 /* Memory Phis in endless loops must be kept alive.
1575 As we can't distinguish these easily we keep all of them alive. */
1576 if ((res->op == op_Phi) && (mode == mode_M))
1577 add_End_keepalive(irg->end, res);
1584 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1586 #if PRECISE_EXC_CONTEXT
1588 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1590 /* Construct a new frag_array for node n.
1591 Copy the content from the current graph_arr of the corresponding block:
1592 this is the current state.
1593 Set ProjM(n) as current memory state.
1594 Further the last entry in frag_arr of current block points to n. This
1595 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1597 static INLINE ir_node ** new_frag_arr (ir_node *n)
1602 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1603 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1604 sizeof(ir_node *)*current_ir_graph->n_loc);
1606 /* turn off optimization before allocating Proj nodes, as res isn't
1608 opt = get_opt_optimize(); set_optimize(0);
1609 /* Here we rely on the fact that all frag ops have Memory as first result! */
1610 if (get_irn_op(n) == op_Call)
1611 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1613 assert((pn_Quot_M == pn_DivMod_M) &&
1614 (pn_Quot_M == pn_Div_M) &&
1615 (pn_Quot_M == pn_Mod_M) &&
1616 (pn_Quot_M == pn_Load_M) &&
1617 (pn_Quot_M == pn_Store_M) &&
1618 (pn_Quot_M == pn_Alloc_M) );
1619 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1623 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1628 * returns the frag_arr from a node
1630 static INLINE ir_node **
1631 get_frag_arr (ir_node *n) {
1632 switch (get_irn_opcode(n)) {
1634 return n->attr.call.exc.frag_arr;
1636 return n->attr.a.exc.frag_arr;
1638 return n->attr.load.exc.frag_arr;
1640 return n->attr.store.exc.frag_arr;
1642 return n->attr.except.frag_arr;
1647 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1649 if (!frag_arr[pos]) frag_arr[pos] = val;
1650 if (frag_arr[current_ir_graph->n_loc - 1]) {
1651 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1652 assert(arr != frag_arr && "Endless recursion detected");
1653 set_frag_value(arr, pos, val);
1658 for (i = 0; i < 1000; ++i) {
1659 if (!frag_arr[pos]) {
1660 frag_arr[pos] = val;
1662 if (frag_arr[current_ir_graph->n_loc - 1]) {
1663 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1669 assert(0 && "potential endless recursion");
1674 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1678 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1680 frag_arr = get_frag_arr(cfOp);
1681 res = frag_arr[pos];
1683 if (block->attr.block.graph_arr[pos]) {
1684 /* There was a set_value after the cfOp and no get_value before that
1685 set_value. We must build a Phi node now. */
1686 if (block->attr.block.matured) {
1687 int ins = get_irn_arity(block);
1689 NEW_ARR_A (ir_node *, nin, ins);
1690 res = phi_merge(block, pos, mode, nin, ins);
1692 res = new_rd_Phi0 (current_ir_graph, block, mode);
1693 res->attr.phi0_pos = pos;
1694 res->link = block->link;
1698 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1699 but this should be better: (remove comment if this works) */
1700 /* It's a Phi, we can write this into all graph_arrs with NULL */
1701 set_frag_value(block->attr.block.graph_arr, pos, res);
1703 res = get_r_value_internal(block, pos, mode);
1704 set_frag_value(block->attr.block.graph_arr, pos, res);
1712 computes the predecessors for the real phi node, and then
1713 allocates and returns this node. The routine called to allocate the
1714 node might optimize it away and return a real value.
1715 This function must be called with an in-array of proper size. **/
1717 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1719 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1722 /* If this block has no value at pos create a Phi0 and remember it
1723 in graph_arr to break recursions.
1724 Else we may not set graph_arr as there a later value is remembered. */
1726 if (!block->attr.block.graph_arr[pos]) {
1727 if (block == get_irg_start_block(current_ir_graph)) {
1728 /* Collapsing to Bad tarvals is no good idea.
1729 So we call a user-supplied routine here that deals with this case as
1730 appropriate for the given language. Sorryly the only help we can give
1731 here is the position.
1733 Even if all variables are defined before use, it can happen that
1734 we get to the start block, if a cond has been replaced by a tuple
1735 (bad, jmp). In this case we call the function needlessly, eventually
1736 generating an non existant error.
1737 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1740 if (default_initialize_local_variable)
1741 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1743 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1744 /* We don't need to care about exception ops in the start block.
1745 There are none by definition. */
1746 return block->attr.block.graph_arr[pos];
1748 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1749 block->attr.block.graph_arr[pos] = phi0;
1750 #if PRECISE_EXC_CONTEXT
1751 if (get_opt_precise_exc_context()) {
1752 /* Set graph_arr for fragile ops. Also here we should break recursion.
1753 We could choose a cyclic path through an cfop. But the recursion would
1754 break at some point. */
1755 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1761 /* This loop goes to all predecessor blocks of the block the Phi node
1762 is in and there finds the operands of the Phi node by calling
1763 get_r_value_internal. */
1764 for (i = 1; i <= ins; ++i) {
1765 prevCfOp = skip_Proj(block->in[i]);
1767 if (is_Bad(prevCfOp)) {
1768 /* In case a Cond has been optimized we would get right to the start block
1769 with an invalid definition. */
1770 nin[i-1] = new_Bad();
1773 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1775 if (!is_Bad(prevBlock)) {
1776 #if PRECISE_EXC_CONTEXT
1777 if (get_opt_precise_exc_context() &&
1778 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1779 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1780 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1783 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1785 nin[i-1] = new_Bad();
1789 /* We want to pass the Phi0 node to the constructor: this finds additional
1790 optimization possibilities.
1791 The Phi0 node either is allocated in this function, or it comes from
1792 a former call to get_r_value_internal. In this case we may not yet
1793 exchange phi0, as this is done in mature_immBlock. */
1795 phi0_all = block->attr.block.graph_arr[pos];
1796 if (!((get_irn_op(phi0_all) == op_Phi) &&
1797 (get_irn_arity(phi0_all) == 0) &&
1798 (get_nodes_block(phi0_all) == block)))
1804 /* After collecting all predecessors into the array nin a new Phi node
1805 with these predecessors is created. This constructor contains an
1806 optimization: If all predecessors of the Phi node are identical it
1807 returns the only operand instead of a new Phi node. */
1808 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1810 /* In case we allocated a Phi0 node at the beginning of this procedure,
1811 we need to exchange this Phi0 with the real Phi. */
1813 exchange(phi0, res);
1814 block->attr.block.graph_arr[pos] = res;
1815 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1816 only an optimization. */
1822 /* This function returns the last definition of a variable. In case
1823 this variable was last defined in a previous block, Phi nodes are
1824 inserted. If the part of the firm graph containing the definition
1825 is not yet constructed, a dummy Phi node is returned. */
1827 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1830 /* There are 4 cases to treat.
1832 1. The block is not mature and we visit it the first time. We can not
1833 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1834 predecessors is returned. This node is added to the linked list (field
1835 "link") of the containing block to be completed when this block is
1836 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1839 2. The value is already known in this block, graph_arr[pos] is set and we
1840 visit the block the first time. We can return the value without
1841 creating any new nodes.
1843 3. The block is mature and we visit it the first time. A Phi node needs
1844 to be created (phi_merge). If the Phi is not needed, as all it's
1845 operands are the same value reaching the block through different
1846 paths, it's optimized away and the value itself is returned.
1848 4. The block is mature, and we visit it the second time. Now two
1849 subcases are possible:
1850 * The value was computed completely the last time we were here. This
1851 is the case if there is no loop. We can return the proper value.
1852 * The recursion that visited this node and set the flag did not
1853 return yet. We are computing a value in a loop and need to
1854 break the recursion. This case only happens if we visited
1855 the same block with phi_merge before, which inserted a Phi0.
1856 So we return the Phi0.
1859 /* case 4 -- already visited. */
1860 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1861 /* As phi_merge allocates a Phi0 this value is always defined. Here
1862 is the critical difference of the two algorithms. */
1863 assert(block->attr.block.graph_arr[pos]);
1864 return block->attr.block.graph_arr[pos];
1867 /* visited the first time */
1868 set_irn_visited(block, get_irg_visited(current_ir_graph));
1870 /* Get the local valid value */
1871 res = block->attr.block.graph_arr[pos];
1873 /* case 2 -- If the value is actually computed, return it. */
1874 if (res) { return res; };
1876 if (block->attr.block.matured) { /* case 3 */
1878 /* The Phi has the same amount of ins as the corresponding block. */
1879 int ins = get_irn_arity(block);
1881 NEW_ARR_A (ir_node *, nin, ins);
1883 /* Phi merge collects the predecessors and then creates a node. */
1884 res = phi_merge (block, pos, mode, nin, ins);
1886 } else { /* case 1 */
1887 /* The block is not mature, we don't know how many in's are needed. A Phi
1888 with zero predecessors is created. Such a Phi node is called Phi0
1889 node. The Phi0 is then added to the list of Phi0 nodes in this block
1890 to be matured by mature_immBlock later.
1891 The Phi0 has to remember the pos of it's internal value. If the real
1892 Phi is computed, pos is used to update the array with the local
1894 res = new_rd_Phi0 (current_ir_graph, block, mode);
1895 res->attr.phi0_pos = pos;
1896 res->link = block->link;
1900 /* If we get here, the frontend missed a use-before-definition error */
1903 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1904 assert (mode->code >= irm_F && mode->code <= irm_P);
1905 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1906 get_mode_null(mode));
1909 /* The local valid value is available now. */
1910 block->attr.block.graph_arr[pos] = res;
1915 #endif /* USE_FAST_PHI_CONSTRUCTION */
1917 /* ************************************************************************** */
1919 /** Finalize a Block node, when all control flows are known. */
1920 /** Acceptable parameters are only Block nodes. */
1922 mature_immBlock (ir_node *block)
1929 assert (get_irn_opcode(block) == iro_Block);
1930 /* @@@ should be commented in
1931 assert (!get_Block_matured(block) && "Block already matured"); */
1933 if (!get_Block_matured(block)) {
1934 ins = ARR_LEN (block->in)-1;
1935 /* Fix block parameters */
1936 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1938 /* An array for building the Phi nodes. */
1939 NEW_ARR_A (ir_node *, nin, ins);
1941 /* Traverse a chain of Phi nodes attached to this block and mature
1943 for (n = block->link; n; n=next) {
1944 inc_irg_visited(current_ir_graph);
1946 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1949 block->attr.block.matured = 1;
1951 /* Now, as the block is a finished firm node, we can optimize it.
1952 Since other nodes have been allocated since the block was created
1953 we can not free the node on the obstack. Therefore we have to call
1955 Unfortunately the optimization does not change a lot, as all allocated
1956 nodes refer to the unoptimized node.
1957 We can call _2, as global cse has no effect on blocks. */
1958 block = optimize_in_place_2(block);
1959 IRN_VRFY_IRG(block, current_ir_graph);
1964 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1966 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1971 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1973 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1978 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1980 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1986 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1988 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1993 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1995 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2000 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2003 assert(arg->op == op_Cond);
2004 arg->attr.c.kind = fragmentary;
2005 arg->attr.c.default_proj = max_proj;
2006 res = new_Proj (arg, mode_X, max_proj);
2011 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2013 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2018 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2020 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2024 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2026 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2031 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2033 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2038 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2040 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2046 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2048 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2053 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2055 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2060 * allocate the frag array
2062 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2063 if (get_opt_precise_exc_context()) {
2064 if ((current_ir_graph->phase_state == phase_building) &&
2065 (get_irn_op(res) == op) && /* Could be optimized away. */
2066 !*frag_store) /* Could be a cse where the arr is already set. */ {
2067 *frag_store = new_frag_arr(res);
2074 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2077 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2079 res->attr.except.pin_state = op_pin_state_pinned;
2080 #if PRECISE_EXC_CONTEXT
2081 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2088 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2091 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2093 res->attr.except.pin_state = op_pin_state_pinned;
2094 #if PRECISE_EXC_CONTEXT
2095 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2102 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2105 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2107 res->attr.except.pin_state = op_pin_state_pinned;
2108 #if PRECISE_EXC_CONTEXT
2109 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2116 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2119 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2121 res->attr.except.pin_state = op_pin_state_pinned;
2122 #if PRECISE_EXC_CONTEXT
2123 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2130 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2132 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2137 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2139 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2144 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2146 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2151 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2153 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2158 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2160 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2165 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2167 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2172 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2174 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2179 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2181 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2186 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2188 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2193 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2195 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2200 new_d_Jmp (dbg_info* db)
2202 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2206 new_d_Cond (dbg_info* db, ir_node *c)
2208 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2212 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2216 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2217 store, callee, arity, in, tp);
2218 #if PRECISE_EXC_CONTEXT
2219 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2226 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2228 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2233 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2235 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2240 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2243 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2245 #if PRECISE_EXC_CONTEXT
2246 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2253 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2256 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2258 #if PRECISE_EXC_CONTEXT
2259 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2266 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2270 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2271 store, size, alloc_type, where);
2272 #if PRECISE_EXC_CONTEXT
2273 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2280 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2282 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2283 store, ptr, size, free_type);
2287 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2288 /* GL: objptr was called frame before. Frame was a bad choice for the name
2289 as the operand could as well be a pointer to a dynamic object. */
2291 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2292 store, objptr, 0, NULL, ent);
2296 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2298 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2299 store, objptr, n_index, index, sel);
2303 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2305 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2306 store, objptr, ent));
2310 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2312 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2317 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2319 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2324 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2326 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2334 return __new_d_Bad();
2338 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2340 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2345 new_d_Unknown (ir_mode *m)
2347 return new_rd_Unknown(current_ir_graph, m);
2351 new_d_CallBegin (dbg_info *db, ir_node *call)
2354 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2359 new_d_EndReg (dbg_info *db)
2362 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2367 new_d_EndExcept (dbg_info *db)
2370 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2375 new_d_Break (dbg_info *db)
2377 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2381 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2383 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2390 return __new_d_NoMem();
2394 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2395 ir_node *ir_true, ir_mode *mode) {
2396 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2397 sel, ir_false, ir_true, mode);
2400 /* ********************************************************************* */
2401 /* Comfortable interface with automatic Phi node construction. */
2402 /* (Uses also constructors of ?? interface, except new_Block. */
2403 /* ********************************************************************* */
2405 /* * Block construction **/
2406 /* immature Block without predecessors */
2407 ir_node *new_d_immBlock (dbg_info* db) {
2410 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2411 /* creates a new dynamic in-array as length of in is -1 */
2412 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2413 current_ir_graph->current_block = res;
2414 res->attr.block.matured = 0;
2415 res->attr.block.dead = 0;
2416 /* res->attr.block.exc = exc_normal; */
2417 /* res->attr.block.handler_entry = 0; */
2418 res->attr.block.irg = current_ir_graph;
2419 res->attr.block.backedge = NULL;
2420 res->attr.block.in_cg = NULL;
2421 res->attr.block.cg_backedge = NULL;
2422 set_Block_block_visited(res, 0);
2424 /* Create and initialize array for Phi-node construction. */
2425 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2426 current_ir_graph->n_loc);
2427 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2429 /* Immature block may not be optimized! */
2430 IRN_VRFY_IRG(res, current_ir_graph);
2436 new_immBlock (void) {
2437 return new_d_immBlock(NULL);
2440 /* add an adge to a jmp/control flow node */
2442 add_immBlock_pred (ir_node *block, ir_node *jmp)
2444 if (block->attr.block.matured) {
2445 assert(0 && "Error: Block already matured!\n");
2448 assert(jmp != NULL);
2449 ARR_APP1(ir_node *, block->in, jmp);
2453 /* changing the current block */
2455 set_cur_block (ir_node *target)
2457 current_ir_graph->current_block = target;
2460 /* ************************ */
2461 /* parameter administration */
2463 /* get a value from the parameter array from the current block by its index */
2465 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2467 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2468 inc_irg_visited(current_ir_graph);
2470 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2472 /* get a value from the parameter array from the current block by its index */
2474 get_value (int pos, ir_mode *mode)
2476 return get_d_value(NULL, pos, mode);
2479 /* set a value at position pos in the parameter array from the current block */
2481 set_value (int pos, ir_node *value)
2483 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2484 assert(pos+1 < current_ir_graph->n_loc);
2485 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2488 /* get the current store */
2492 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2493 /* GL: one could call get_value instead */
2494 inc_irg_visited(current_ir_graph);
2495 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2498 /* set the current store */
2500 set_store (ir_node *store)
2502 /* GL: one could call set_value instead */
2503 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2504 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2508 keep_alive (ir_node *ka)
2510 add_End_keepalive(current_ir_graph->end, ka);
2513 /** Useful access routines **/
2514 /* Returns the current block of the current graph. To set the current
2515 block use set_cur_block. */
2516 ir_node *get_cur_block() {
2517 return get_irg_current_block(current_ir_graph);
2520 /* Returns the frame type of the current graph */
2521 type *get_cur_frame_type() {
2522 return get_irg_frame_type(current_ir_graph);
2526 /* ********************************************************************* */
2529 /* call once for each run of the library */
2531 init_cons (default_initialize_local_variable_func_t *func)
2533 default_initialize_local_variable = func;
2536 /* call for each graph */
2538 finalize_cons (ir_graph *irg) {
2539 irg->phase_state = phase_high;
2543 ir_node *new_Block(int arity, ir_node **in) {
2544 return new_d_Block(NULL, arity, in);
2546 ir_node *new_Start (void) {
2547 return new_d_Start(NULL);
2549 ir_node *new_End (void) {
2550 return new_d_End(NULL);
2552 ir_node *new_Jmp (void) {
2553 return new_d_Jmp(NULL);
2555 ir_node *new_Cond (ir_node *c) {
2556 return new_d_Cond(NULL, c);
2558 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2559 return new_d_Return(NULL, store, arity, in);
2561 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2562 return new_d_Raise(NULL, store, obj);
2564 ir_node *new_Const (ir_mode *mode, tarval *con) {
2565 return new_d_Const(NULL, mode, con);
2568 ir_node *new_Const_type(tarval *con, type *tp) {
2569 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2572 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2573 return new_d_SymConst(NULL, value, kind);
2575 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2576 return new_d_simpleSel(NULL, store, objptr, ent);
2578 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2580 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2582 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2583 return new_d_InstOf (NULL, store, objptr, ent);
2585 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2587 return new_d_Call(NULL, store, callee, arity, in, tp);
2589 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2590 return new_d_Add(NULL, op1, op2, mode);
2592 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2593 return new_d_Sub(NULL, op1, op2, mode);
2595 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2596 return new_d_Minus(NULL, op, mode);
2598 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2599 return new_d_Mul(NULL, op1, op2, mode);
2601 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2602 return new_d_Quot(NULL, memop, op1, op2);
2604 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2605 return new_d_DivMod(NULL, memop, op1, op2);
2607 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2608 return new_d_Div(NULL, memop, op1, op2);
2610 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2611 return new_d_Mod(NULL, memop, op1, op2);
2613 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2614 return new_d_Abs(NULL, op, mode);
2616 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2617 return new_d_And(NULL, op1, op2, mode);
2619 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2620 return new_d_Or(NULL, op1, op2, mode);
2622 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2623 return new_d_Eor(NULL, op1, op2, mode);
2625 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2626 return new_d_Not(NULL, op, mode);
2628 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2629 return new_d_Shl(NULL, op, k, mode);
2631 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2632 return new_d_Shr(NULL, op, k, mode);
2634 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2635 return new_d_Shrs(NULL, op, k, mode);
2637 #define new_Rotate new_Rot
2638 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2639 return new_d_Rot(NULL, op, k, mode);
2641 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2642 return new_d_Cmp(NULL, op1, op2);
2644 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2645 return new_d_Conv(NULL, op, mode);
2647 ir_node *new_Cast (ir_node *op, type *to_tp) {
2648 return new_d_Cast(NULL, op, to_tp);
2650 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2651 return new_d_Phi(NULL, arity, in, mode);
2653 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2654 return new_d_Load(NULL, store, addr, mode);
2656 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2657 return new_d_Store(NULL, store, addr, val);
2659 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2660 where_alloc where) {
2661 return new_d_Alloc(NULL, store, size, alloc_type, where);
2663 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2665 return new_d_Free(NULL, store, ptr, size, free_type);
2667 ir_node *new_Sync (int arity, ir_node **in) {
2668 return new_d_Sync(NULL, arity, in);
2670 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2671 return new_d_Proj(NULL, arg, mode, proj);
2673 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2674 return new_d_defaultProj(NULL, arg, max_proj);
2676 ir_node *new_Tuple (int arity, ir_node **in) {
2677 return new_d_Tuple(NULL, arity, in);
2679 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2680 return new_d_Id(NULL, val, mode);
2682 ir_node *new_Bad (void) {
2685 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2686 return new_d_Confirm (NULL, val, bound, cmp);
2688 ir_node *new_Unknown(ir_mode *m) {
2689 return new_d_Unknown(m);
2691 ir_node *new_CallBegin (ir_node *callee) {
2692 return new_d_CallBegin(NULL, callee);
2694 ir_node *new_EndReg (void) {
2695 return new_d_EndReg(NULL);
2697 ir_node *new_EndExcept (void) {
2698 return new_d_EndExcept(NULL);
2700 ir_node *new_Break (void) {
2701 return new_d_Break(NULL);
2703 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2704 return new_d_Filter(NULL, arg, mode, proj);
2706 ir_node *new_NoMem (void) {
2707 return new_d_NoMem();
2709 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2710 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);