3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irgraph_t.h"
29 # include "irnode_t.h"
30 # include "irmode_t.h"
31 # include "ircons_t.h"
32 # include "firm_common_t.h"
38 # include "irbackedge_t.h"
39 # include "irflag_t.h"
41 #if USE_EXPLICIT_PHI_IN_STACK
42 /* A stack needed for the automatic Phi node construction in constructor
43 Phi_in. Redefinition in irgraph.c!! */
48 typedef struct Phi_in_stack Phi_in_stack;
51 /* when we need verifying */
53 # define IRN_VRFY_IRG(res, irg)
55 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
59 * language dependant initialization variable
61 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
63 /* -------------------------------------------- */
64 /* privat interfaces, for professional use only */
65 /* -------------------------------------------- */
67 /* Constructs a Block with a fixed number of predecessors.
68 Does not set current_block. Can not be used with automatic
69 Phi node construction. */
71 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
87 IRN_VRFY_IRG(res, irg);
92 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
96 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
97 /* res->attr.start.irg = irg; */
99 IRN_VRFY_IRG(res, irg);
104 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
108 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
110 IRN_VRFY_IRG(res, irg);
114 /* Creates a Phi node with all predecessors. Calling this constructor
115 is only allowed if the corresponding block is mature. */
117 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
121 bool has_unknown = false;
123 /* Don't assert that block matured: the use of this constructor is strongly
125 if ( get_Block_matured(block) )
126 assert( get_irn_arity(block) == arity );
128 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
130 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
132 for (i = arity-1; i >= 0; i--)
133 if (get_irn_op(in[i]) == op_Unknown) {
138 if (!has_unknown) res = optimize_node (res);
139 IRN_VRFY_IRG(res, irg);
141 /* Memory Phis in endless loops must be kept alive.
142 As we can't distinguish these easily we keep all of them alive. */
143 if ((res->op == op_Phi) && (mode == mode_M))
144 add_End_keepalive(irg->end, res);
149 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
153 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
154 res->attr.con.tv = con;
155 set_Const_type(res, tp); /* Call method because of complex assertion. */
156 res = optimize_node (res);
157 assert(get_Const_type(res) == tp);
158 IRN_VRFY_IRG(res, irg);
164 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
166 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
170 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
174 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
175 res = optimize_node(res);
176 IRN_VRFY_IRG(res, irg);
181 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
186 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
187 res->attr.proj = proj;
190 assert(get_Proj_pred(res));
191 assert(get_nodes_block(get_Proj_pred(res)));
193 res = optimize_node(res);
195 IRN_VRFY_IRG(res, irg);
201 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
205 assert(arg->op == op_Cond);
206 arg->attr.c.kind = fragmentary;
207 arg->attr.c.default_proj = max_proj;
208 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
213 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
217 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
218 res = optimize_node(res);
219 IRN_VRFY_IRG(res, irg);
224 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
228 assert(is_atomic_type(to_tp));
230 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
231 res->attr.cast.totype = to_tp;
232 res = optimize_node(res);
233 IRN_VRFY_IRG(res, irg);
238 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
242 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
243 res = optimize_node (res);
244 IRN_VRFY_IRG(res, irg);
249 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
250 ir_node *op1, ir_node *op2, ir_mode *mode)
257 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
258 res = optimize_node(res);
259 IRN_VRFY_IRG(res, irg);
264 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
265 ir_node *op1, ir_node *op2, ir_mode *mode)
272 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
273 res = optimize_node (res);
274 IRN_VRFY_IRG(res, irg);
279 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
280 ir_node *op, ir_mode *mode)
284 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
285 res = optimize_node(res);
286 IRN_VRFY_IRG(res, irg);
291 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
292 ir_node *op1, ir_node *op2, ir_mode *mode)
299 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
300 res = optimize_node(res);
301 IRN_VRFY_IRG(res, irg);
306 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
307 ir_node *memop, ir_node *op1, ir_node *op2)
315 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
316 res = optimize_node(res);
317 IRN_VRFY_IRG(res, irg);
322 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
323 ir_node *memop, ir_node *op1, ir_node *op2)
331 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
332 res = optimize_node(res);
333 IRN_VRFY_IRG(res, irg);
338 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
339 ir_node *memop, ir_node *op1, ir_node *op2)
347 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
355 ir_node *memop, ir_node *op1, ir_node *op2)
363 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
371 ir_node *op1, ir_node *op2, ir_mode *mode)
378 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
379 res = optimize_node(res);
380 IRN_VRFY_IRG(res, irg);
385 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
386 ir_node *op1, ir_node *op2, ir_mode *mode)
393 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
394 res = optimize_node(res);
395 IRN_VRFY_IRG(res, irg);
400 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
401 ir_node *op1, ir_node *op2, ir_mode *mode)
408 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
409 res = optimize_node (res);
410 IRN_VRFY_IRG(res, irg);
415 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
416 ir_node *op, ir_mode *mode)
420 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
421 res = optimize_node(res);
422 IRN_VRFY_IRG(res, irg);
427 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
428 ir_node *op, ir_node *k, ir_mode *mode)
435 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
436 res = optimize_node(res);
437 IRN_VRFY_IRG(res, irg);
442 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
443 ir_node *op, ir_node *k, ir_mode *mode)
450 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
451 res = optimize_node(res);
452 IRN_VRFY_IRG(res, irg);
457 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
458 ir_node *op, ir_node *k, ir_mode *mode)
465 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
466 res = optimize_node(res);
467 IRN_VRFY_IRG(res, irg);
472 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
473 ir_node *op, ir_node *k, ir_mode *mode)
480 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
481 res = optimize_node(res);
482 IRN_VRFY_IRG(res, irg);
487 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
488 ir_node *op, ir_mode *mode)
492 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
493 res = optimize_node (res);
494 IRN_VRFY_IRG(res, irg);
499 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
500 ir_node *op1, ir_node *op2)
507 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
508 res = optimize_node(res);
509 IRN_VRFY_IRG(res, irg);
514 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
518 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
519 res = optimize_node (res);
520 IRN_VRFY_IRG (res, irg);
525 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
529 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
530 res->attr.c.kind = dense;
531 res->attr.c.default_proj = 0;
532 res = optimize_node (res);
533 IRN_VRFY_IRG(res, irg);
538 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
539 ir_node *callee, int arity, ir_node **in, type *tp)
546 NEW_ARR_A(ir_node *, r_in, r_arity);
549 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
551 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
553 assert((get_unknown_type() == tp) || is_Method_type(tp));
554 set_Call_type(res, tp);
555 res->attr.call.exc.pin_state = op_pin_state_pinned;
556 res->attr.call.callee_arr = NULL;
557 res = optimize_node(res);
558 IRN_VRFY_IRG(res, irg);
563 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
564 ir_node *store, int arity, ir_node **in)
571 NEW_ARR_A (ir_node *, r_in, r_arity);
573 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
574 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
575 res = optimize_node(res);
576 IRN_VRFY_IRG(res, irg);
581 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
588 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
589 res = optimize_node(res);
590 IRN_VRFY_IRG(res, irg);
595 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
596 ir_node *store, ir_node *adr, ir_mode *mode)
603 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
604 res->attr.load.exc.pin_state = op_pin_state_pinned;
605 res->attr.load.load_mode = mode;
606 res->attr.load.volatility = volatility_non_volatile;
607 res = optimize_node(res);
608 IRN_VRFY_IRG(res, irg);
613 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
614 ir_node *store, ir_node *adr, ir_node *val)
622 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
623 res->attr.store.exc.pin_state = op_pin_state_pinned;
624 res->attr.store.volatility = volatility_non_volatile;
625 res = optimize_node(res);
626 IRN_VRFY_IRG(res, irg);
631 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
632 ir_node *size, type *alloc_type, where_alloc where)
639 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
640 res->attr.a.exc.pin_state = op_pin_state_pinned;
641 res->attr.a.where = where;
642 res->attr.a.type = alloc_type;
643 res = optimize_node(res);
644 IRN_VRFY_IRG(res, irg);
649 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
650 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
658 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
659 res->attr.f.where = where;
660 res->attr.f.type = free_type;
661 res = optimize_node(res);
662 IRN_VRFY_IRG(res, irg);
667 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
668 int arity, ir_node **in, entity *ent)
674 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
677 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
680 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
681 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
682 res->attr.s.ent = ent;
683 res = optimize_node(res);
684 IRN_VRFY_IRG(res, irg);
689 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
690 ir_node *objptr, type *ent)
697 NEW_ARR_A(ir_node *, r_in, r_arity);
701 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
702 res->attr.io.ent = ent;
704 /* res = optimize(res); */
705 IRN_VRFY_IRG(res, irg);
710 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
711 symconst_kind symkind, type *tp) {
715 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
720 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
722 res->attr.i.num = symkind;
723 res->attr.i.sym = value;
726 res = optimize_node(res);
727 IRN_VRFY_IRG(res, irg);
732 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
733 symconst_kind symkind)
735 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
739 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
740 symconst_symbol sym = {(type *)symbol};
741 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
744 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
745 symconst_symbol sym = {(type *)symbol};
746 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
749 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
750 symconst_symbol sym = {symbol};
751 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
754 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
755 symconst_symbol sym = {symbol};
756 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
760 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
764 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
765 res = optimize_node(res);
766 IRN_VRFY_IRG(res, irg);
771 new_rd_Bad (ir_graph *irg)
777 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
779 ir_node *in[2], *res;
783 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
784 res->attr.confirm_cmp = cmp;
785 res = optimize_node (res);
786 IRN_VRFY_IRG(res, irg);
791 new_rd_Unknown (ir_graph *irg, ir_mode *m)
793 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
797 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
802 in[0] = get_Call_ptr(call);
803 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
804 /* res->attr.callbegin.irg = irg; */
805 res->attr.callbegin.call = call;
806 res = optimize_node(res);
807 IRN_VRFY_IRG(res, irg);
812 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
816 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
818 IRN_VRFY_IRG(res, irg);
823 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
827 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
828 irg->end_except = res;
829 IRN_VRFY_IRG (res, irg);
834 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
838 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
839 res = optimize_node(res);
840 IRN_VRFY_IRG(res, irg);
845 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
850 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
851 res->attr.filter.proj = proj;
852 res->attr.filter.in_cg = NULL;
853 res->attr.filter.backedge = NULL;
856 assert(get_Proj_pred(res));
857 assert(get_nodes_block(get_Proj_pred(res)));
859 res = optimize_node(res);
860 IRN_VRFY_IRG(res, irg);
865 new_rd_NoMem (ir_graph *irg) {
870 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
871 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
880 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
883 res = optimize_node(res);
884 IRN_VRFY_IRG(res, irg);
889 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
890 return new_rd_Block(NULL, irg, arity, in);
892 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
893 return new_rd_Start(NULL, irg, block);
895 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
896 return new_rd_End(NULL, irg, block);
898 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
899 return new_rd_Jmp(NULL, irg, block);
901 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
902 return new_rd_Cond(NULL, irg, block, c);
904 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
905 ir_node *store, int arity, ir_node **in) {
906 return new_rd_Return(NULL, irg, block, store, arity, in);
908 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
909 ir_node *store, ir_node *obj) {
910 return new_rd_Raise(NULL, irg, block, store, obj);
912 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
913 ir_mode *mode, tarval *con) {
914 return new_rd_Const(NULL, irg, block, mode, con);
916 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
917 symconst_symbol value, symconst_kind symkind) {
918 return new_rd_SymConst(NULL, irg, block, value, symkind);
920 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
921 ir_node *objptr, int n_index, ir_node **index,
923 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
925 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
927 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
929 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
930 ir_node *callee, int arity, ir_node **in,
932 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
934 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
935 ir_node *op1, ir_node *op2, ir_mode *mode) {
936 return new_rd_Add(NULL, irg, block, op1, op2, mode);
938 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
939 ir_node *op1, ir_node *op2, ir_mode *mode) {
940 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
942 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
943 ir_node *op, ir_mode *mode) {
944 return new_rd_Minus(NULL, irg, block, op, mode);
946 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
947 ir_node *op1, ir_node *op2, ir_mode *mode) {
948 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
950 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
951 ir_node *memop, ir_node *op1, ir_node *op2) {
952 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
954 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
955 ir_node *memop, ir_node *op1, ir_node *op2) {
956 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
958 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
959 ir_node *memop, ir_node *op1, ir_node *op2) {
960 return new_rd_Div(NULL, irg, block, memop, op1, op2);
962 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
963 ir_node *memop, ir_node *op1, ir_node *op2) {
964 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
966 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
967 ir_node *op, ir_mode *mode) {
968 return new_rd_Abs(NULL, irg, block, op, mode);
970 ir_node *new_r_And (ir_graph *irg, ir_node *block,
971 ir_node *op1, ir_node *op2, ir_mode *mode) {
972 return new_rd_And(NULL, irg, block, op1, op2, mode);
974 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
975 ir_node *op1, ir_node *op2, ir_mode *mode) {
976 return new_rd_Or(NULL, irg, block, op1, op2, mode);
978 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
979 ir_node *op1, ir_node *op2, ir_mode *mode) {
980 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
982 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
983 ir_node *op, ir_mode *mode) {
984 return new_rd_Not(NULL, irg, block, op, mode);
986 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
987 ir_node *op1, ir_node *op2) {
988 return new_rd_Cmp(NULL, irg, block, op1, op2);
990 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
991 ir_node *op, ir_node *k, ir_mode *mode) {
992 return new_rd_Shl(NULL, irg, block, op, k, mode);
994 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
995 ir_node *op, ir_node *k, ir_mode *mode) {
996 return new_rd_Shr(NULL, irg, block, op, k, mode);
998 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
999 ir_node *op, ir_node *k, ir_mode *mode) {
1000 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1002 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1003 ir_node *op, ir_node *k, ir_mode *mode) {
1004 return new_rd_Rot(NULL, irg, block, op, k, mode);
1006 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1007 ir_node *op, ir_mode *mode) {
1008 return new_rd_Conv(NULL, irg, block, op, mode);
1010 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1011 return new_rd_Cast(NULL, irg, block, op, to_tp);
1013 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1014 ir_node **in, ir_mode *mode) {
1015 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1017 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1018 ir_node *store, ir_node *adr, ir_mode *mode) {
1019 return new_rd_Load(NULL, irg, block, store, adr, mode);
1021 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1022 ir_node *store, ir_node *adr, ir_node *val) {
1023 return new_rd_Store(NULL, irg, block, store, adr, val);
1025 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1026 ir_node *size, type *alloc_type, where_alloc where) {
1027 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1029 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1030 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1031 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1033 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1034 return new_rd_Sync(NULL, irg, block, arity, in);
1036 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1037 ir_mode *mode, long proj) {
1038 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1040 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1042 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1044 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1045 int arity, ir_node **in) {
1046 return new_rd_Tuple(NULL, irg, block, arity, in );
1048 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1049 ir_node *val, ir_mode *mode) {
1050 return new_rd_Id(NULL, irg, block, val, mode);
1052 ir_node *new_r_Bad (ir_graph *irg) {
1053 return new_rd_Bad(irg);
1055 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1056 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1058 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1059 return new_rd_Unknown(irg, m);
1061 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1062 return new_rd_CallBegin(NULL, irg, block, callee);
1064 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1065 return new_rd_EndReg(NULL, irg, block);
1067 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1068 return new_rd_EndExcept(NULL, irg, block);
1070 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1071 return new_rd_Break(NULL, irg, block);
1073 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1074 ir_mode *mode, long proj) {
1075 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1077 ir_node *new_r_NoMem (ir_graph *irg) {
1078 return new_rd_NoMem(irg);
1080 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1081 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1082 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1086 /** ********************/
1087 /** public interfaces */
1088 /** construction tools */
1092 * - create a new Start node in the current block
1094 * @return s - pointer to the created Start node
1099 new_d_Start (dbg_info* db)
1103 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1104 op_Start, mode_T, 0, NULL);
1105 /* res->attr.start.irg = current_ir_graph; */
1107 res = optimize_node(res);
1108 IRN_VRFY_IRG(res, current_ir_graph);
1113 new_d_End (dbg_info* db)
1116 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1117 op_End, mode_X, -1, NULL);
1118 res = optimize_node(res);
1119 IRN_VRFY_IRG(res, current_ir_graph);
1124 /* Constructs a Block with a fixed number of predecessors.
1125 Does set current_block. Can be used with automatic Phi
1126 node construction. */
1128 new_d_Block (dbg_info* db, int arity, ir_node **in)
1132 bool has_unknown = false;
1134 res = new_rd_Block(db, current_ir_graph, arity, in);
1136 /* Create and initialize array for Phi-node construction. */
1137 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1138 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1139 current_ir_graph->n_loc);
1140 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1143 for (i = arity-1; i >= 0; i--)
1144 if (get_irn_op(in[i]) == op_Unknown) {
1149 if (!has_unknown) res = optimize_node(res);
1150 current_ir_graph->current_block = res;
1152 IRN_VRFY_IRG(res, current_ir_graph);
1157 /* ***********************************************************************/
1158 /* Methods necessary for automatic Phi node creation */
1160 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1161 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1162 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1163 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1165 Call Graph: ( A ---> B == A "calls" B)
1167 get_value mature_immBlock
1175 get_r_value_internal |
1179 new_rd_Phi0 new_rd_Phi_in
1181 * *************************************************************************** */
1183 /** Creates a Phi node with 0 predecessors */
1184 static INLINE ir_node *
1185 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1189 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1190 IRN_VRFY_IRG(res, irg);
1194 /* There are two implementations of the Phi node construction. The first
1195 is faster, but does not work for blocks with more than 2 predecessors.
1196 The second works always but is slower and causes more unnecessary Phi
1198 Select the implementations by the following preprocessor flag set in
1200 #if USE_FAST_PHI_CONSTRUCTION
1202 /* This is a stack used for allocating and deallocating nodes in
1203 new_rd_Phi_in. The original implementation used the obstack
1204 to model this stack, now it is explicit. This reduces side effects.
1206 #if USE_EXPLICIT_PHI_IN_STACK
1208 new_Phi_in_stack(void) {
1211 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1213 res->stack = NEW_ARR_F (ir_node *, 0);
1220 free_Phi_in_stack(Phi_in_stack *s) {
1221 DEL_ARR_F(s->stack);
1225 free_to_Phi_in_stack(ir_node *phi) {
1226 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1227 current_ir_graph->Phi_in_stack->pos)
1228 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1230 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1232 (current_ir_graph->Phi_in_stack->pos)++;
1235 static INLINE ir_node *
1236 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1237 int arity, ir_node **in) {
1239 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1240 int pos = current_ir_graph->Phi_in_stack->pos;
1244 /* We need to allocate a new node */
1245 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1246 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1248 /* reuse the old node and initialize it again. */
1251 assert (res->kind == k_ir_node);
1252 assert (res->op == op_Phi);
1256 assert (arity >= 0);
1257 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1258 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1260 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1262 (current_ir_graph->Phi_in_stack->pos)--;
1266 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1268 /* Creates a Phi node with a given, fixed array **in of predecessors.
1269 If the Phi node is unnecessary, as the same value reaches the block
1270 through all control flow paths, it is eliminated and the value
1271 returned directly. This constructor is only intended for use in
1272 the automatic Phi node generation triggered by get_value or mature.
1273 The implementation is quite tricky and depends on the fact, that
1274 the nodes are allocated on a stack:
1275 The in array contains predecessors and NULLs. The NULLs appear,
1276 if get_r_value_internal, that computed the predecessors, reached
1277 the same block on two paths. In this case the same value reaches
1278 this block on both paths, there is no definition in between. We need
1279 not allocate a Phi where these path's merge, but we have to communicate
1280 this fact to the caller. This happens by returning a pointer to the
1281 node the caller _will_ allocate. (Yes, we predict the address. We can
1282 do so because the nodes are allocated on the obstack.) The caller then
1283 finds a pointer to itself and, when this routine is called again,
1286 static INLINE ir_node *
1287 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1290 ir_node *res, *known;
1292 /* Allocate a new node on the obstack. This can return a node to
1293 which some of the pointers in the in-array already point.
1294 Attention: the constructor copies the in array, i.e., the later
1295 changes to the array in this routine do not affect the
1296 constructed node! If the in array contains NULLs, there will be
1297 missing predecessors in the returned node. Is this a possible
1298 internal state of the Phi node generation? */
1299 #if USE_EXPLICIT_PHI_IN_STACK
1300 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1302 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1303 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1306 /* The in-array can contain NULLs. These were returned by
1307 get_r_value_internal if it reached the same block/definition on a
1308 second path. The NULLs are replaced by the node itself to
1309 simplify the test in the next loop. */
1310 for (i = 0; i < ins; ++i) {
1315 /* This loop checks whether the Phi has more than one predecessor.
1316 If so, it is a real Phi node and we break the loop. Else the Phi
1317 node merges the same definition on several paths and therefore is
1319 for (i = 0; i < ins; ++i)
1321 if (in[i] == res || in[i] == known) continue;
1329 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1331 #if USE_EXPLICIT_PHI_IN_STACK
1332 free_to_Phi_in_stack(res);
1334 obstack_free (current_ir_graph->obst, res);
1338 res = optimize_node (res);
1339 IRN_VRFY_IRG(res, irg);
1342 /* return the pointer to the Phi node. This node might be deallocated! */
1347 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1350 allocates and returns this node. The routine called to allocate the
1351 node might optimize it away and return a real value, or even a pointer
1352 to a deallocated Phi node on top of the obstack!
1353 This function is called with an in-array of proper size. **/
1355 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1357 ir_node *prevBlock, *res;
1360 /* This loop goes to all predecessor blocks of the block the Phi node is in
1361 and there finds the operands of the Phi node by calling
1362 get_r_value_internal. */
1363 for (i = 1; i <= ins; ++i) {
1364 assert (block->in[i]);
1365 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1367 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1370 /* After collecting all predecessors into the array nin a new Phi node
1371 with these predecessors is created. This constructor contains an
1372 optimization: If all predecessors of the Phi node are identical it
1373 returns the only operand instead of a new Phi node. If the value
1374 passes two different control flow edges without being defined, and
1375 this is the second path treated, a pointer to the node that will be
1376 allocated for the first path (recursion) is returned. We already
1377 know the address of this node, as it is the next node to be allocated
1378 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1379 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1381 /* Now we now the value for "pos" and can enter it in the array with
1382 all known local variables. Attention: this might be a pointer to
1383 a node, that later will be allocated!!! See new_rd_Phi_in.
1384 If this is called in mature, after some set_value in the same block,
1385 the proper value must not be overwritten:
1387 get_value (makes Phi0, put's it into graph_arr)
1388 set_value (overwrites Phi0 in graph_arr)
1389 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1392 if (!block->attr.block.graph_arr[pos]) {
1393 block->attr.block.graph_arr[pos] = res;
1395 /* printf(" value already computed by %s\n",
1396 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1402 /* This function returns the last definition of a variable. In case
1403 this variable was last defined in a previous block, Phi nodes are
1404 inserted. If the part of the firm graph containing the definition
1405 is not yet constructed, a dummy Phi node is returned. */
1407 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1410 /* There are 4 cases to treat.
1412 1. The block is not mature and we visit it the first time. We can not
1413 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1414 predecessors is returned. This node is added to the linked list (field
1415 "link") of the containing block to be completed when this block is
1416 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1419 2. The value is already known in this block, graph_arr[pos] is set and we
1420 visit the block the first time. We can return the value without
1421 creating any new nodes.
1423 3. The block is mature and we visit it the first time. A Phi node needs
1424 to be created (phi_merge). If the Phi is not needed, as all it's
1425 operands are the same value reaching the block through different
1426 paths, it's optimized away and the value itself is returned.
1428 4. The block is mature, and we visit it the second time. Now two
1429 subcases are possible:
1430 * The value was computed completely the last time we were here. This
1431 is the case if there is no loop. We can return the proper value.
1432 * The recursion that visited this node and set the flag did not
1433 return yet. We are computing a value in a loop and need to
1434 break the recursion without knowing the result yet.
1435 @@@ strange case. Straight forward we would create a Phi before
1436 starting the computation of it's predecessors. In this case we will
1437 find a Phi here in any case. The problem is that this implementation
1438 only creates a Phi after computing the predecessors, so that it is
1439 hard to compute self references of this Phi. @@@
1440 There is no simple check for the second subcase. Therefore we check
1441 for a second visit and treat all such cases as the second subcase.
1442 Anyways, the basic situation is the same: we reached a block
1443 on two paths without finding a definition of the value: No Phi
1444 nodes are needed on both paths.
1445 We return this information "Two paths, no Phi needed" by a very tricky
1446 implementation that relies on the fact that an obstack is a stack and
1447 will return a node with the same address on different allocations.
1448 Look also at phi_merge and new_rd_phi_in to understand this.
1449 @@@ Unfortunately this does not work, see testprogram
1450 three_cfpred_example.
1454 /* case 4 -- already visited. */
1455 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1457 /* visited the first time */
1458 set_irn_visited(block, get_irg_visited(current_ir_graph));
1460 /* Get the local valid value */
1461 res = block->attr.block.graph_arr[pos];
1463 /* case 2 -- If the value is actually computed, return it. */
1464 if (res) return res;
1466 if (block->attr.block.matured) { /* case 3 */
1468 /* The Phi has the same amount of ins as the corresponding block. */
1469 int ins = get_irn_arity(block);
1471 NEW_ARR_A (ir_node *, nin, ins);
1473 /* Phi merge collects the predecessors and then creates a node. */
1474 res = phi_merge (block, pos, mode, nin, ins);
1476 } else { /* case 1 */
1477 /* The block is not mature, we don't know how many in's are needed. A Phi
1478 with zero predecessors is created. Such a Phi node is called Phi0
1479 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1480 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1482 The Phi0 has to remember the pos of it's internal value. If the real
1483 Phi is computed, pos is used to update the array with the local
1486 res = new_rd_Phi0 (current_ir_graph, block, mode);
1487 res->attr.phi0_pos = pos;
1488 res->link = block->link;
1492 /* If we get here, the frontend missed a use-before-definition error */
1495 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1496 assert (mode->code >= irm_F && mode->code <= irm_P);
1497 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1498 tarval_mode_null[mode->code]);
1501 /* The local valid value is available now. */
1502 block->attr.block.graph_arr[pos] = res;
1510 it starts the recursion. This causes an Id at the entry of
1511 every block that has no definition of the value! **/
1513 #if USE_EXPLICIT_PHI_IN_STACK
1515 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1516 void free_Phi_in_stack(Phi_in_stack *s) { }
1519 static INLINE ir_node *
1520 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1521 ir_node **in, int ins, ir_node *phi0)
1524 ir_node *res, *known;
1526 /* Allocate a new node on the obstack. The allocation copies the in
1528 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1529 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1531 /* This loop checks whether the Phi has more than one predecessor.
1532 If so, it is a real Phi node and we break the loop. Else the
1533 Phi node merges the same definition on several paths and therefore
1534 is not needed. Don't consider Bad nodes! */
1536 for (i=0; i < ins; ++i)
1540 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1542 /* Optimize self referencing Phis: We can't detect them yet properly, as
1543 they still refer to the Phi0 they will replace. So replace right now. */
1544 if (phi0 && in[i] == phi0) in[i] = res;
1546 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1554 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1557 obstack_free (current_ir_graph->obst, res);
1558 if (is_Phi(known)) {
1559 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1560 order, an enclosing Phi know may get superfluous. */
1561 res = optimize_in_place_2(known);
1562 if (res != known) { exchange(known, res); }
1567 /* A undefined value, e.g., in unreachable code. */
1571 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1572 IRN_VRFY_IRG(res, irg);
1573 /* Memory Phis in endless loops must be kept alive.
1574 As we can't distinguish these easily we keep all of them alive. */
1575 if ((res->op == op_Phi) && (mode == mode_M))
1576 add_End_keepalive(irg->end, res);
1583 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1585 #if PRECISE_EXC_CONTEXT
1587 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1589 /* Construct a new frag_array for node n.
1590 Copy the content from the current graph_arr of the corresponding block:
1591 this is the current state.
1592 Set ProjM(n) as current memory state.
1593 Further the last entry in frag_arr of current block points to n. This
1594 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1596 static INLINE ir_node ** new_frag_arr (ir_node *n)
1601 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1602 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1603 sizeof(ir_node *)*current_ir_graph->n_loc);
1605 /* turn off optimization before allocating Proj nodes, as res isn't
1607 opt = get_opt_optimize(); set_optimize(0);
1608 /* Here we rely on the fact that all frag ops have Memory as first result! */
1609 if (get_irn_op(n) == op_Call)
1610 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1612 assert((pn_Quot_M == pn_DivMod_M) &&
1613 (pn_Quot_M == pn_Div_M) &&
1614 (pn_Quot_M == pn_Mod_M) &&
1615 (pn_Quot_M == pn_Load_M) &&
1616 (pn_Quot_M == pn_Store_M) &&
1617 (pn_Quot_M == pn_Alloc_M) );
1618 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1622 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1627 * returns the frag_arr from a node
1629 static INLINE ir_node **
1630 get_frag_arr (ir_node *n) {
1631 switch (get_irn_opcode(n)) {
1633 return n->attr.call.exc.frag_arr;
1635 return n->attr.a.exc.frag_arr;
1637 return n->attr.load.exc.frag_arr;
1639 return n->attr.store.exc.frag_arr;
1641 return n->attr.except.frag_arr;
1646 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1648 if (!frag_arr[pos]) frag_arr[pos] = val;
1649 if (frag_arr[current_ir_graph->n_loc - 1]) {
1650 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1651 assert(arr != frag_arr && "Endless recursion detected");
1652 set_frag_value(arr, pos, val);
1657 for (i = 0; i < 1000; ++i) {
1658 if (!frag_arr[pos]) {
1659 frag_arr[pos] = val;
1661 if (frag_arr[current_ir_graph->n_loc - 1]) {
1662 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1668 assert(0 && "potential endless recursion");
1673 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1677 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1679 frag_arr = get_frag_arr(cfOp);
1680 res = frag_arr[pos];
1682 if (block->attr.block.graph_arr[pos]) {
1683 /* There was a set_value after the cfOp and no get_value before that
1684 set_value. We must build a Phi node now. */
1685 if (block->attr.block.matured) {
1686 int ins = get_irn_arity(block);
1688 NEW_ARR_A (ir_node *, nin, ins);
1689 res = phi_merge(block, pos, mode, nin, ins);
1691 res = new_rd_Phi0 (current_ir_graph, block, mode);
1692 res->attr.phi0_pos = pos;
1693 res->link = block->link;
1697 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1698 but this should be better: (remove comment if this works) */
1699 /* It's a Phi, we can write this into all graph_arrs with NULL */
1700 set_frag_value(block->attr.block.graph_arr, pos, res);
1702 res = get_r_value_internal(block, pos, mode);
1703 set_frag_value(block->attr.block.graph_arr, pos, res);
1711 computes the predecessors for the real phi node, and then
1712 allocates and returns this node. The routine called to allocate the
1713 node might optimize it away and return a real value.
1714 This function must be called with an in-array of proper size. **/
1716 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1718 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1721 /* If this block has no value at pos create a Phi0 and remember it
1722 in graph_arr to break recursions.
1723 Else we may not set graph_arr as there a later value is remembered. */
1725 if (!block->attr.block.graph_arr[pos]) {
1726 if (block == get_irg_start_block(current_ir_graph)) {
1727 /* Collapsing to Bad tarvals is no good idea.
1728 So we call a user-supplied routine here that deals with this case as
1729 appropriate for the given language. Sorryly the only help we can give
1730 here is the position.
1732 Even if all variables are defined before use, it can happen that
1733 we get to the start block, if a cond has been replaced by a tuple
1734 (bad, jmp). In this case we call the function needlessly, eventually
1735 generating an non existant error.
1736 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1739 if (default_initialize_local_variable)
1740 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1742 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1743 /* We don't need to care about exception ops in the start block.
1744 There are none by definition. */
1745 return block->attr.block.graph_arr[pos];
1747 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1748 block->attr.block.graph_arr[pos] = phi0;
1749 #if PRECISE_EXC_CONTEXT
1750 if (get_opt_precise_exc_context()) {
1751 /* Set graph_arr for fragile ops. Also here we should break recursion.
1752 We could choose a cyclic path through an cfop. But the recursion would
1753 break at some point. */
1754 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1760 /* This loop goes to all predecessor blocks of the block the Phi node
1761 is in and there finds the operands of the Phi node by calling
1762 get_r_value_internal. */
1763 for (i = 1; i <= ins; ++i) {
1764 prevCfOp = skip_Proj(block->in[i]);
1766 if (is_Bad(prevCfOp)) {
1767 /* In case a Cond has been optimized we would get right to the start block
1768 with an invalid definition. */
1769 nin[i-1] = new_Bad();
1772 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1774 if (!is_Bad(prevBlock)) {
1775 #if PRECISE_EXC_CONTEXT
1776 if (get_opt_precise_exc_context() &&
1777 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1778 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1779 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1782 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1784 nin[i-1] = new_Bad();
1788 /* We want to pass the Phi0 node to the constructor: this finds additional
1789 optimization possibilities.
1790 The Phi0 node either is allocated in this function, or it comes from
1791 a former call to get_r_value_internal. In this case we may not yet
1792 exchange phi0, as this is done in mature_immBlock. */
1794 phi0_all = block->attr.block.graph_arr[pos];
1795 if (!((get_irn_op(phi0_all) == op_Phi) &&
1796 (get_irn_arity(phi0_all) == 0) &&
1797 (get_nodes_block(phi0_all) == block)))
1803 /* After collecting all predecessors into the array nin a new Phi node
1804 with these predecessors is created. This constructor contains an
1805 optimization: If all predecessors of the Phi node are identical it
1806 returns the only operand instead of a new Phi node. */
1807 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1809 /* In case we allocated a Phi0 node at the beginning of this procedure,
1810 we need to exchange this Phi0 with the real Phi. */
1812 exchange(phi0, res);
1813 block->attr.block.graph_arr[pos] = res;
1814 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1815 only an optimization. */
1821 /* This function returns the last definition of a variable. In case
1822 this variable was last defined in a previous block, Phi nodes are
1823 inserted. If the part of the firm graph containing the definition
1824 is not yet constructed, a dummy Phi node is returned. */
1826 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1829 /* There are 4 cases to treat.
1831 1. The block is not mature and we visit it the first time. We can not
1832 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1833 predecessors is returned. This node is added to the linked list (field
1834 "link") of the containing block to be completed when this block is
1835 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1838 2. The value is already known in this block, graph_arr[pos] is set and we
1839 visit the block the first time. We can return the value without
1840 creating any new nodes.
1842 3. The block is mature and we visit it the first time. A Phi node needs
1843 to be created (phi_merge). If the Phi is not needed, as all it's
1844 operands are the same value reaching the block through different
1845 paths, it's optimized away and the value itself is returned.
1847 4. The block is mature, and we visit it the second time. Now two
1848 subcases are possible:
1849 * The value was computed completely the last time we were here. This
1850 is the case if there is no loop. We can return the proper value.
1851 * The recursion that visited this node and set the flag did not
1852 return yet. We are computing a value in a loop and need to
1853 break the recursion. This case only happens if we visited
1854 the same block with phi_merge before, which inserted a Phi0.
1855 So we return the Phi0.
1858 /* case 4 -- already visited. */
1859 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1860 /* As phi_merge allocates a Phi0 this value is always defined. Here
1861 is the critical difference of the two algorithms. */
1862 assert(block->attr.block.graph_arr[pos]);
1863 return block->attr.block.graph_arr[pos];
1866 /* visited the first time */
1867 set_irn_visited(block, get_irg_visited(current_ir_graph));
1869 /* Get the local valid value */
1870 res = block->attr.block.graph_arr[pos];
1872 /* case 2 -- If the value is actually computed, return it. */
1873 if (res) { return res; };
1875 if (block->attr.block.matured) { /* case 3 */
1877 /* The Phi has the same amount of ins as the corresponding block. */
1878 int ins = get_irn_arity(block);
1880 NEW_ARR_A (ir_node *, nin, ins);
1882 /* Phi merge collects the predecessors and then creates a node. */
1883 res = phi_merge (block, pos, mode, nin, ins);
1885 } else { /* case 1 */
1886 /* The block is not mature, we don't know how many in's are needed. A Phi
1887 with zero predecessors is created. Such a Phi node is called Phi0
1888 node. The Phi0 is then added to the list of Phi0 nodes in this block
1889 to be matured by mature_immBlock later.
1890 The Phi0 has to remember the pos of it's internal value. If the real
1891 Phi is computed, pos is used to update the array with the local
1893 res = new_rd_Phi0 (current_ir_graph, block, mode);
1894 res->attr.phi0_pos = pos;
1895 res->link = block->link;
1899 /* If we get here, the frontend missed a use-before-definition error */
1902 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1903 assert (mode->code >= irm_F && mode->code <= irm_P);
1904 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1905 get_mode_null(mode));
1908 /* The local valid value is available now. */
1909 block->attr.block.graph_arr[pos] = res;
1914 #endif /* USE_FAST_PHI_CONSTRUCTION */
1916 /* ************************************************************************** */
1918 /** Finalize a Block node, when all control flows are known. */
1919 /** Acceptable parameters are only Block nodes. */
1921 mature_immBlock (ir_node *block)
1928 assert (get_irn_opcode(block) == iro_Block);
1929 /* @@@ should be commented in
1930 assert (!get_Block_matured(block) && "Block already matured"); */
1932 if (!get_Block_matured(block)) {
1933 ins = ARR_LEN (block->in)-1;
1934 /* Fix block parameters */
1935 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1937 /* An array for building the Phi nodes. */
1938 NEW_ARR_A (ir_node *, nin, ins);
1940 /* Traverse a chain of Phi nodes attached to this block and mature
1942 for (n = block->link; n; n=next) {
1943 inc_irg_visited(current_ir_graph);
1945 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1948 block->attr.block.matured = 1;
1950 /* Now, as the block is a finished firm node, we can optimize it.
1951 Since other nodes have been allocated since the block was created
1952 we can not free the node on the obstack. Therefore we have to call
1954 Unfortunately the optimization does not change a lot, as all allocated
1955 nodes refer to the unoptimized node.
1956 We can call _2, as global cse has no effect on blocks. */
1957 block = optimize_in_place_2(block);
1958 IRN_VRFY_IRG(block, current_ir_graph);
1963 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1965 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1970 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1972 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1977 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1979 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1985 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1987 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1992 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1994 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1999 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2002 assert(arg->op == op_Cond);
2003 arg->attr.c.kind = fragmentary;
2004 arg->attr.c.default_proj = max_proj;
2005 res = new_Proj (arg, mode_X, max_proj);
2010 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2012 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2017 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2019 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2023 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2025 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2030 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2032 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2037 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2039 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2045 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2047 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2052 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2054 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2059 * allocate the frag array
2061 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2062 if (get_opt_precise_exc_context()) {
2063 if ((current_ir_graph->phase_state == phase_building) &&
2064 (get_irn_op(res) == op) && /* Could be optimized away. */
2065 !*frag_store) /* Could be a cse where the arr is already set. */ {
2066 *frag_store = new_frag_arr(res);
2073 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2076 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2078 res->attr.except.pin_state = op_pin_state_pinned;
2079 #if PRECISE_EXC_CONTEXT
2080 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2087 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2090 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2092 res->attr.except.pin_state = op_pin_state_pinned;
2093 #if PRECISE_EXC_CONTEXT
2094 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2101 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2104 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2106 res->attr.except.pin_state = op_pin_state_pinned;
2107 #if PRECISE_EXC_CONTEXT
2108 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2115 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2118 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2120 res->attr.except.pin_state = op_pin_state_pinned;
2121 #if PRECISE_EXC_CONTEXT
2122 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2129 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2131 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2136 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2138 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2143 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2145 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2150 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2152 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2157 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2159 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2164 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2166 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2171 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2173 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2178 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2180 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2185 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2187 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2192 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2194 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2199 new_d_Jmp (dbg_info* db)
2201 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2205 new_d_Cond (dbg_info* db, ir_node *c)
2207 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2211 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2215 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2216 store, callee, arity, in, tp);
2217 #if PRECISE_EXC_CONTEXT
2218 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2225 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2227 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2232 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2234 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2239 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2242 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2244 #if PRECISE_EXC_CONTEXT
2245 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2252 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2255 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2257 #if PRECISE_EXC_CONTEXT
2258 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2265 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2269 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2270 store, size, alloc_type, where);
2271 #if PRECISE_EXC_CONTEXT
2272 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2279 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2280 ir_node *size, type *free_type, where_alloc where)
2282 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2283 store, ptr, size, free_type, where);
2287 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2288 /* GL: objptr was called frame before. Frame was a bad choice for the name
2289 as the operand could as well be a pointer to a dynamic object. */
2291 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2292 store, objptr, 0, NULL, ent);
2296 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2298 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2299 store, objptr, n_index, index, sel);
2303 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2305 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2306 store, objptr, ent));
2310 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2312 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2317 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2319 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2324 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2326 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2334 return _new_d_Bad();
2338 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2340 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2345 new_d_Unknown (ir_mode *m)
2347 return new_rd_Unknown(current_ir_graph, m);
2351 new_d_CallBegin (dbg_info *db, ir_node *call)
2354 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2359 new_d_EndReg (dbg_info *db)
2362 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2367 new_d_EndExcept (dbg_info *db)
2370 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2375 new_d_Break (dbg_info *db)
2377 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2381 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2383 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2390 return _new_d_NoMem();
2394 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2395 ir_node *ir_true, ir_mode *mode) {
2396 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2397 sel, ir_false, ir_true, mode);
2400 /* ********************************************************************* */
2401 /* Comfortable interface with automatic Phi node construction. */
2402 /* (Uses also constructors of ?? interface, except new_Block. */
2403 /* ********************************************************************* */
2405 /* * Block construction **/
2406 /* immature Block without predecessors */
2407 ir_node *new_d_immBlock (dbg_info* db) {
2410 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2411 /* creates a new dynamic in-array as length of in is -1 */
2412 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2413 current_ir_graph->current_block = res;
2414 res->attr.block.matured = 0;
2415 res->attr.block.dead = 0;
2416 /* res->attr.block.exc = exc_normal; */
2417 /* res->attr.block.handler_entry = 0; */
2418 res->attr.block.irg = current_ir_graph;
2419 res->attr.block.backedge = NULL;
2420 res->attr.block.in_cg = NULL;
2421 res->attr.block.cg_backedge = NULL;
2422 set_Block_block_visited(res, 0);
2424 /* Create and initialize array for Phi-node construction. */
2425 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2426 current_ir_graph->n_loc);
2427 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2429 /* Immature block may not be optimized! */
2430 IRN_VRFY_IRG(res, current_ir_graph);
2436 new_immBlock (void) {
2437 return new_d_immBlock(NULL);
2440 /* add an adge to a jmp/control flow node */
2442 add_immBlock_pred (ir_node *block, ir_node *jmp)
2444 if (block->attr.block.matured) {
2445 assert(0 && "Error: Block already matured!\n");
2448 assert(jmp != NULL);
2449 ARR_APP1(ir_node *, block->in, jmp);
2453 /* changing the current block */
2455 set_cur_block (ir_node *target)
2457 current_ir_graph->current_block = target;
2460 /* ************************ */
2461 /* parameter administration */
2463 /* get a value from the parameter array from the current block by its index */
2465 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2467 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2468 inc_irg_visited(current_ir_graph);
2470 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2472 /* get a value from the parameter array from the current block by its index */
2474 get_value (int pos, ir_mode *mode)
2476 return get_d_value(NULL, pos, mode);
2479 /* set a value at position pos in the parameter array from the current block */
2481 set_value (int pos, ir_node *value)
2483 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2484 assert(pos+1 < current_ir_graph->n_loc);
2485 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2488 /* get the current store */
2492 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2493 /* GL: one could call get_value instead */
2494 inc_irg_visited(current_ir_graph);
2495 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2498 /* set the current store */
2500 set_store (ir_node *store)
2502 /* GL: one could call set_value instead */
2503 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2504 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2508 keep_alive (ir_node *ka)
2510 add_End_keepalive(current_ir_graph->end, ka);
2513 /** Useful access routines **/
2514 /* Returns the current block of the current graph. To set the current
2515 block use set_cur_block. */
2516 ir_node *get_cur_block() {
2517 return get_irg_current_block(current_ir_graph);
2520 /* Returns the frame type of the current graph */
2521 type *get_cur_frame_type() {
2522 return get_irg_frame_type(current_ir_graph);
2526 /* ********************************************************************* */
2529 /* call once for each run of the library */
2531 init_cons(uninitialized_local_variable_func_t *func)
2533 default_initialize_local_variable = func;
2536 /* call for each graph */
2538 finalize_cons (ir_graph *irg) {
2539 irg->phase_state = phase_high;
2543 ir_node *new_Block(int arity, ir_node **in) {
2544 return new_d_Block(NULL, arity, in);
2546 ir_node *new_Start (void) {
2547 return new_d_Start(NULL);
2549 ir_node *new_End (void) {
2550 return new_d_End(NULL);
2552 ir_node *new_Jmp (void) {
2553 return new_d_Jmp(NULL);
2555 ir_node *new_Cond (ir_node *c) {
2556 return new_d_Cond(NULL, c);
2558 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2559 return new_d_Return(NULL, store, arity, in);
2561 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2562 return new_d_Raise(NULL, store, obj);
2564 ir_node *new_Const (ir_mode *mode, tarval *con) {
2565 return new_d_Const(NULL, mode, con);
2568 ir_node *new_Const_type(tarval *con, type *tp) {
2569 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2572 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2573 return new_d_SymConst(NULL, value, kind);
2575 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2576 return new_d_simpleSel(NULL, store, objptr, ent);
2578 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2580 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2582 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2583 return new_d_InstOf (NULL, store, objptr, ent);
2585 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2587 return new_d_Call(NULL, store, callee, arity, in, tp);
2589 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2590 return new_d_Add(NULL, op1, op2, mode);
2592 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2593 return new_d_Sub(NULL, op1, op2, mode);
2595 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2596 return new_d_Minus(NULL, op, mode);
2598 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2599 return new_d_Mul(NULL, op1, op2, mode);
2601 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2602 return new_d_Quot(NULL, memop, op1, op2);
2604 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2605 return new_d_DivMod(NULL, memop, op1, op2);
2607 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2608 return new_d_Div(NULL, memop, op1, op2);
2610 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2611 return new_d_Mod(NULL, memop, op1, op2);
2613 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2614 return new_d_Abs(NULL, op, mode);
2616 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2617 return new_d_And(NULL, op1, op2, mode);
2619 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2620 return new_d_Or(NULL, op1, op2, mode);
2622 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2623 return new_d_Eor(NULL, op1, op2, mode);
2625 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2626 return new_d_Not(NULL, op, mode);
2628 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2629 return new_d_Shl(NULL, op, k, mode);
2631 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2632 return new_d_Shr(NULL, op, k, mode);
2634 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2635 return new_d_Shrs(NULL, op, k, mode);
2637 #define new_Rotate new_Rot
2638 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2639 return new_d_Rot(NULL, op, k, mode);
2641 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2642 return new_d_Cmp(NULL, op1, op2);
2644 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2645 return new_d_Conv(NULL, op, mode);
2647 ir_node *new_Cast (ir_node *op, type *to_tp) {
2648 return new_d_Cast(NULL, op, to_tp);
2650 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2651 return new_d_Phi(NULL, arity, in, mode);
2653 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2654 return new_d_Load(NULL, store, addr, mode);
2656 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2657 return new_d_Store(NULL, store, addr, val);
2659 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2660 where_alloc where) {
2661 return new_d_Alloc(NULL, store, size, alloc_type, where);
2663 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2664 type *free_type, where_alloc where) {
2665 return new_d_Free(NULL, store, ptr, size, free_type, where);
2667 ir_node *new_Sync (int arity, ir_node **in) {
2668 return new_d_Sync(NULL, arity, in);
2670 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2671 return new_d_Proj(NULL, arg, mode, proj);
2673 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2674 return new_d_defaultProj(NULL, arg, max_proj);
2676 ir_node *new_Tuple (int arity, ir_node **in) {
2677 return new_d_Tuple(NULL, arity, in);
2679 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2680 return new_d_Id(NULL, val, mode);
2682 ir_node *new_Bad (void) {
2685 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2686 return new_d_Confirm (NULL, val, bound, cmp);
2688 ir_node *new_Unknown(ir_mode *m) {
2689 return new_d_Unknown(m);
2691 ir_node *new_CallBegin (ir_node *callee) {
2692 return new_d_CallBegin(NULL, callee);
2694 ir_node *new_EndReg (void) {
2695 return new_d_EndReg(NULL);
2697 ir_node *new_EndExcept (void) {
2698 return new_d_EndExcept(NULL);
2700 ir_node *new_Break (void) {
2701 return new_d_Break(NULL);
2703 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2704 return new_d_Filter(NULL, arg, mode, proj);
2706 ir_node *new_NoMem (void) {
2707 return new_d_NoMem();
2709 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2710 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);