3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /*** ******************************************** */
56 /** privat interfaces, for professional use only */
58 /* Constructs a Block with a fixed number of predecessors.
59 Does not set current_block. Can not be used with automatic
60 Phi node construction. */
62 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
66 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
67 set_Block_matured(res, 1);
68 set_Block_block_visited(res, 0);
70 /* res->attr.block.exc = exc_normal; */
71 /* res->attr.block.handler_entry = 0; */
72 res->attr.block.irg = irg;
73 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
74 res->attr.block.in_cg = NULL;
75 res->attr.block.cg_backedge = NULL;
77 IRN_VRFY_IRG(res, irg);
82 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
86 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
87 /* res->attr.start.irg = irg; */
89 IRN_VRFY_IRG(res, irg);
94 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
100 IRN_VRFY_IRG(res, irg);
104 /* Creates a Phi node with all predecessors. Calling this constructor
105 is only allowed if the corresponding block is mature. */
107 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
111 bool has_unknown = false;
113 /* Don't assert that block matured: the use of this constructor is strongly
115 if ( get_Block_matured(block) )
116 assert( get_irn_arity(block) == arity );
118 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
120 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
122 for (i = arity-1; i >= 0; i--)
123 if (get_irn_op(in[i]) == op_Unknown) {
128 if (!has_unknown) res = optimize_node (res);
129 IRN_VRFY_IRG(res, irg);
131 /* Memory Phis in endless loops must be kept alive.
132 As we can't distinguish these easily we keep all of them alive. */
133 if ((res->op == op_Phi) && (mode == mode_M))
134 add_End_keepalive(irg->end, res);
139 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
143 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
144 res->attr.con.tv = con;
145 set_Const_type(res, tp); /* Call method because of complex assertion. */
146 res = optimize_node (res);
147 assert(get_Const_type(res) == tp);
148 IRN_VRFY_IRG(res, irg);
154 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
156 type *tp = unknown_type;
157 /* removing this somehow causes errors in jack. */
158 return new_rd_Const_type (db, irg, block, mode, con, tp);
162 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
166 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
167 res = optimize_node(res);
168 IRN_VRFY_IRG(res, irg);
173 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
178 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
179 res->attr.proj = proj;
182 assert(get_Proj_pred(res));
183 assert(get_nodes_block(get_Proj_pred(res)));
185 res = optimize_node(res);
187 IRN_VRFY_IRG(res, irg);
193 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
197 assert(arg->op == op_Cond);
198 arg->attr.c.kind = fragmentary;
199 arg->attr.c.default_proj = max_proj;
200 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
205 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
209 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
210 res = optimize_node(res);
211 IRN_VRFY_IRG(res, irg);
216 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
220 assert(is_atomic_type(to_tp));
222 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
223 res->attr.cast.totype = to_tp;
224 res = optimize_node(res);
225 IRN_VRFY_IRG(res, irg);
230 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
234 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
235 res = optimize_node (res);
236 IRN_VRFY_IRG(res, irg);
241 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
242 ir_node *op1, ir_node *op2, ir_mode *mode)
249 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
250 res = optimize_node(res);
251 IRN_VRFY_IRG(res, irg);
256 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
257 ir_node *op1, ir_node *op2, ir_mode *mode)
264 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
272 ir_node *op, ir_mode *mode)
276 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
277 res = optimize_node(res);
278 IRN_VRFY_IRG(res, irg);
283 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
284 ir_node *op1, ir_node *op2, ir_mode *mode)
291 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
292 res = optimize_node(res);
293 IRN_VRFY_IRG(res, irg);
298 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
299 ir_node *memop, ir_node *op1, ir_node *op2)
307 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
308 res = optimize_node(res);
309 IRN_VRFY_IRG(res, irg);
314 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
315 ir_node *memop, ir_node *op1, ir_node *op2)
323 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
324 res = optimize_node(res);
325 IRN_VRFY_IRG(res, irg);
330 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
331 ir_node *memop, ir_node *op1, ir_node *op2)
339 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
340 res = optimize_node(res);
341 IRN_VRFY_IRG(res, irg);
346 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
347 ir_node *memop, ir_node *op1, ir_node *op2)
355 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
356 res = optimize_node(res);
357 IRN_VRFY_IRG(res, irg);
362 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
363 ir_node *op1, ir_node *op2, ir_mode *mode)
370 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
371 res = optimize_node(res);
372 IRN_VRFY_IRG(res, irg);
377 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
378 ir_node *op1, ir_node *op2, ir_mode *mode)
385 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
386 res = optimize_node(res);
387 IRN_VRFY_IRG(res, irg);
392 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
393 ir_node *op1, ir_node *op2, ir_mode *mode)
400 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
401 res = optimize_node (res);
402 IRN_VRFY_IRG(res, irg);
407 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
408 ir_node *op, ir_mode *mode)
412 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
413 res = optimize_node(res);
414 IRN_VRFY_IRG(res, irg);
419 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
420 ir_node *op, ir_node *k, ir_mode *mode)
427 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
428 res = optimize_node(res);
429 IRN_VRFY_IRG(res, irg);
434 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
435 ir_node *op, ir_node *k, ir_mode *mode)
442 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
450 ir_node *op, ir_node *k, ir_mode *mode)
457 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
458 res = optimize_node(res);
459 IRN_VRFY_IRG(res, irg);
464 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
465 ir_node *op, ir_node *k, ir_mode *mode)
472 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
473 res = optimize_node(res);
474 IRN_VRFY_IRG(res, irg);
479 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
480 ir_node *op, ir_mode *mode)
484 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
485 res = optimize_node (res);
486 IRN_VRFY_IRG(res, irg);
491 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
492 ir_node *op1, ir_node *op2)
499 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
500 res = optimize_node(res);
501 IRN_VRFY_IRG(res, irg);
506 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
510 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
511 res = optimize_node (res);
512 IRN_VRFY_IRG (res, irg);
517 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
521 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
522 res->attr.c.kind = dense;
523 res->attr.c.default_proj = 0;
524 res = optimize_node (res);
525 IRN_VRFY_IRG(res, irg);
530 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
531 ir_node *callee, int arity, ir_node **in, type *tp)
538 NEW_ARR_A(ir_node *, r_in, r_arity);
541 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
543 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
545 assert((get_unknown_type() == tp) || is_method_type(tp));
546 set_Call_type(res, tp);
547 res->attr.call.exc.pin_state = op_pin_state_pinned;
548 res->attr.call.callee_arr = NULL;
549 res = optimize_node(res);
550 IRN_VRFY_IRG(res, irg);
555 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
556 ir_node *store, int arity, ir_node **in)
563 NEW_ARR_A (ir_node *, r_in, r_arity);
565 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
566 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
580 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
581 res = optimize_node(res);
582 IRN_VRFY_IRG(res, irg);
587 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
588 ir_node *store, ir_node *adr, ir_mode *mode)
595 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
596 res->attr.load.exc.pin_state = op_pin_state_pinned;
597 res->attr.load.load_mode = mode;
598 res->attr.load.volatility = volatility_non_volatile;
599 res = optimize_node(res);
600 IRN_VRFY_IRG(res, irg);
605 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
606 ir_node *store, ir_node *adr, ir_node *val)
614 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
615 res->attr.store.exc.pin_state = op_pin_state_pinned;
616 res->attr.store.volatility = volatility_non_volatile;
617 res = optimize_node(res);
618 IRN_VRFY_IRG(res, irg);
623 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
624 ir_node *size, type *alloc_type, where_alloc where)
631 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
632 res->attr.a.exc.pin_state = op_pin_state_pinned;
633 res->attr.a.where = where;
634 res->attr.a.type = alloc_type;
635 res = optimize_node(res);
636 IRN_VRFY_IRG(res, irg);
641 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
642 ir_node *ptr, ir_node *size, type *free_type)
650 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
651 res->attr.f = free_type;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
658 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
659 int arity, ir_node **in, entity *ent)
665 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
668 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
671 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
672 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
673 res->attr.s.ent = ent;
674 res = optimize_node(res);
675 IRN_VRFY_IRG(res, irg);
680 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
681 ir_node *objptr, type *ent)
688 NEW_ARR_A(ir_node *, r_in, r_arity);
692 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
693 res->attr.io.ent = ent;
695 /* res = optimize(res); */
696 IRN_VRFY_IRG(res, irg);
701 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
702 symconst_kind symkind, type *tp) {
706 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
711 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
713 res->attr.i.num = symkind;
714 res->attr.i.sym = value;
717 res = optimize_node(res);
718 IRN_VRFY_IRG(res, irg);
723 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
724 symconst_kind symkind)
726 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
730 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
731 symconst_symbol sym = {(type *)symbol};
732 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
735 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
736 symconst_symbol sym = {(type *)symbol};
737 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
740 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
741 symconst_symbol sym = {symbol};
742 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
745 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
746 symconst_symbol sym = {symbol};
747 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
751 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
755 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
756 res = optimize_node(res);
757 IRN_VRFY_IRG(res, irg);
762 new_rd_Bad (ir_graph *irg)
768 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
770 ir_node *in[2], *res;
774 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
775 res->attr.confirm_cmp = cmp;
776 res = optimize_node (res);
777 IRN_VRFY_IRG(res, irg);
782 new_rd_Unknown (ir_graph *irg, ir_mode *m)
784 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
788 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
793 in[0] = get_Call_ptr(call);
794 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
795 /* res->attr.callbegin.irg = irg; */
796 res->attr.callbegin.call = call;
797 res = optimize_node(res);
798 IRN_VRFY_IRG(res, irg);
803 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
807 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
809 IRN_VRFY_IRG(res, irg);
814 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
818 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
819 irg->end_except = res;
820 IRN_VRFY_IRG (res, irg);
825 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
829 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
830 res = optimize_node(res);
831 IRN_VRFY_IRG(res, irg);
836 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
841 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
842 res->attr.filter.proj = proj;
843 res->attr.filter.in_cg = NULL;
844 res->attr.filter.backedge = NULL;
847 assert(get_Proj_pred(res));
848 assert(get_nodes_block(get_Proj_pred(res)));
850 res = optimize_node(res);
851 IRN_VRFY_IRG(res, irg);
857 new_rd_NoMem (ir_graph *irg)
863 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
864 return new_rd_Block(NULL, irg, arity, in);
866 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
867 return new_rd_Start(NULL, irg, block);
869 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
870 return new_rd_End(NULL, irg, block);
872 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
873 return new_rd_Jmp(NULL, irg, block);
875 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
876 return new_rd_Cond(NULL, irg, block, c);
878 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
879 ir_node *store, int arity, ir_node **in) {
880 return new_rd_Return(NULL, irg, block, store, arity, in);
882 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
883 ir_node *store, ir_node *obj) {
884 return new_rd_Raise(NULL, irg, block, store, obj);
886 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
887 ir_mode *mode, tarval *con) {
888 return new_rd_Const(NULL, irg, block, mode, con);
890 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
891 symconst_symbol value, symconst_kind symkind) {
892 return new_rd_SymConst(NULL, irg, block, value, symkind);
894 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
895 ir_node *objptr, int n_index, ir_node **index,
897 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
899 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
901 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
903 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
904 ir_node *callee, int arity, ir_node **in,
906 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
908 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
909 ir_node *op1, ir_node *op2, ir_mode *mode) {
910 return new_rd_Add(NULL, irg, block, op1, op2, mode);
912 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
913 ir_node *op1, ir_node *op2, ir_mode *mode) {
914 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
916 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
917 ir_node *op, ir_mode *mode) {
918 return new_rd_Minus(NULL, irg, block, op, mode);
920 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
921 ir_node *op1, ir_node *op2, ir_mode *mode) {
922 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
924 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
925 ir_node *memop, ir_node *op1, ir_node *op2) {
926 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
928 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
929 ir_node *memop, ir_node *op1, ir_node *op2) {
930 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
932 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
933 ir_node *memop, ir_node *op1, ir_node *op2) {
934 return new_rd_Div(NULL, irg, block, memop, op1, op2);
936 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
937 ir_node *memop, ir_node *op1, ir_node *op2) {
938 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
940 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
941 ir_node *op, ir_mode *mode) {
942 return new_rd_Abs(NULL, irg, block, op, mode);
944 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
945 ir_node *op1, ir_node *op2, ir_mode *mode) {
946 return new_rd_And(NULL, irg, block, op1, op2, mode);
948 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
949 ir_node *op1, ir_node *op2, ir_mode *mode) {
950 return new_rd_Or(NULL, irg, block, op1, op2, mode);
952 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
953 ir_node *op1, ir_node *op2, ir_mode *mode) {
954 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
956 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
957 ir_node *op, ir_mode *mode) {
958 return new_rd_Not(NULL, irg, block, op, mode);
960 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
961 ir_node *op1, ir_node *op2) {
962 return new_rd_Cmp(NULL, irg, block, op1, op2);
964 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
965 ir_node *op, ir_node *k, ir_mode *mode) {
966 return new_rd_Shl(NULL, irg, block, op, k, mode);
968 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
969 ir_node *op, ir_node *k, ir_mode *mode) {
970 return new_rd_Shr(NULL, irg, block, op, k, mode);
972 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
973 ir_node *op, ir_node *k, ir_mode *mode) {
974 return new_rd_Shrs(NULL, irg, block, op, k, mode);
976 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
977 ir_node *op, ir_node *k, ir_mode *mode) {
978 return new_rd_Rot(NULL, irg, block, op, k, mode);
980 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
981 ir_node *op, ir_mode *mode) {
982 return new_rd_Conv(NULL, irg, block, op, mode);
984 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
985 return new_rd_Cast(NULL, irg, block, op, to_tp);
987 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
988 ir_node **in, ir_mode *mode) {
989 return new_rd_Phi(NULL, irg, block, arity, in, mode);
991 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
992 ir_node *store, ir_node *adr, ir_mode *mode) {
993 return new_rd_Load(NULL, irg, block, store, adr, mode);
995 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
996 ir_node *store, ir_node *adr, ir_node *val) {
997 return new_rd_Store(NULL, irg, block, store, adr, val);
999 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1000 ir_node *size, type *alloc_type, where_alloc where) {
1001 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1003 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1004 ir_node *ptr, ir_node *size, type *free_type) {
1005 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1007 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1008 return new_rd_Sync(NULL, irg, block, arity, in);
1010 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1011 ir_mode *mode, long proj) {
1012 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1014 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1016 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1018 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1019 int arity, ir_node **in) {
1020 return new_rd_Tuple(NULL, irg, block, arity, in );
1022 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1023 ir_node *val, ir_mode *mode) {
1024 return new_rd_Id(NULL, irg, block, val, mode);
1026 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1027 return new_rd_Bad(irg);
1029 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1030 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1032 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1033 return new_rd_Unknown(irg, m);
1035 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1036 return new_rd_CallBegin(NULL, irg, block, callee);
1038 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1039 return new_rd_EndReg(NULL, irg, block);
1041 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1042 return new_rd_EndExcept(NULL, irg, block);
1044 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1045 return new_rd_Break(NULL, irg, block);
1047 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1048 ir_mode *mode, long proj) {
1049 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1051 INLINE ir_node *new_r_NoMem (ir_graph *irg) {
1052 return new_rd_NoMem(irg);
1056 /** ********************/
1057 /** public interfaces */
1058 /** construction tools */
1062 * - create a new Start node in the current block
1064 * @return s - pointer to the created Start node
1069 new_d_Start (dbg_info* db)
1073 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1074 op_Start, mode_T, 0, NULL);
1075 /* res->attr.start.irg = current_ir_graph; */
1077 res = optimize_node(res);
1078 IRN_VRFY_IRG(res, current_ir_graph);
1083 new_d_End (dbg_info* db)
1086 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1087 op_End, mode_X, -1, NULL);
1088 res = optimize_node(res);
1089 IRN_VRFY_IRG(res, current_ir_graph);
1094 /* Constructs a Block with a fixed number of predecessors.
1095 Does set current_block. Can be used with automatic Phi
1096 node construction. */
1098 new_d_Block (dbg_info* db, int arity, ir_node **in)
1102 bool has_unknown = false;
1104 res = new_rd_Block(db, current_ir_graph, arity, in);
1106 /* Create and initialize array for Phi-node construction. */
1107 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1108 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1109 current_ir_graph->n_loc);
1110 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1113 for (i = arity-1; i >= 0; i--)
1114 if (get_irn_op(in[i]) == op_Unknown) {
1119 if (!has_unknown) res = optimize_node(res);
1120 current_ir_graph->current_block = res;
1122 IRN_VRFY_IRG(res, current_ir_graph);
1127 /* ***********************************************************************/
1128 /* Methods necessary for automatic Phi node creation */
1130 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1131 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1132 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1133 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1135 Call Graph: ( A ---> B == A "calls" B)
1137 get_value mature_immBlock
1145 get_r_value_internal |
1149 new_rd_Phi0 new_rd_Phi_in
1151 * *************************************************************************** */
1153 /** Creates a Phi node with 0 predecessors */
1154 static INLINE ir_node *
1155 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1159 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1160 IRN_VRFY_IRG(res, irg);
1164 /* There are two implementations of the Phi node construction. The first
1165 is faster, but does not work for blocks with more than 2 predecessors.
1166 The second works always but is slower and causes more unnecessary Phi
1168 Select the implementations by the following preprocessor flag set in
1170 #if USE_FAST_PHI_CONSTRUCTION
1172 /* This is a stack used for allocating and deallocating nodes in
1173 new_rd_Phi_in. The original implementation used the obstack
1174 to model this stack, now it is explicit. This reduces side effects.
1176 #if USE_EXPLICIT_PHI_IN_STACK
1177 INLINE Phi_in_stack *
1178 new_Phi_in_stack(void) {
1181 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1183 res->stack = NEW_ARR_F (ir_node *, 0);
1190 free_Phi_in_stack(Phi_in_stack *s) {
1191 DEL_ARR_F(s->stack);
1195 free_to_Phi_in_stack(ir_node *phi) {
1196 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1197 current_ir_graph->Phi_in_stack->pos)
1198 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1200 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1202 (current_ir_graph->Phi_in_stack->pos)++;
1205 static INLINE ir_node *
1206 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1207 int arity, ir_node **in) {
1209 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1210 int pos = current_ir_graph->Phi_in_stack->pos;
1214 /* We need to allocate a new node */
1215 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1216 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1218 /* reuse the old node and initialize it again. */
1221 assert (res->kind == k_ir_node);
1222 assert (res->op == op_Phi);
1226 assert (arity >= 0);
1227 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1228 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1230 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1232 (current_ir_graph->Phi_in_stack->pos)--;
1236 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1238 /* Creates a Phi node with a given, fixed array **in of predecessors.
1239 If the Phi node is unnecessary, as the same value reaches the block
1240 through all control flow paths, it is eliminated and the value
1241 returned directly. This constructor is only intended for use in
1242 the automatic Phi node generation triggered by get_value or mature.
1243 The implementation is quite tricky and depends on the fact, that
1244 the nodes are allocated on a stack:
1245 The in array contains predecessors and NULLs. The NULLs appear,
1246 if get_r_value_internal, that computed the predecessors, reached
1247 the same block on two paths. In this case the same value reaches
1248 this block on both paths, there is no definition in between. We need
1249 not allocate a Phi where these path's merge, but we have to communicate
1250 this fact to the caller. This happens by returning a pointer to the
1251 node the caller _will_ allocate. (Yes, we predict the address. We can
1252 do so because the nodes are allocated on the obstack.) The caller then
1253 finds a pointer to itself and, when this routine is called again,
1256 static INLINE ir_node *
1257 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1260 ir_node *res, *known;
1262 /* Allocate a new node on the obstack. This can return a node to
1263 which some of the pointers in the in-array already point.
1264 Attention: the constructor copies the in array, i.e., the later
1265 changes to the array in this routine do not affect the
1266 constructed node! If the in array contains NULLs, there will be
1267 missing predecessors in the returned node. Is this a possible
1268 internal state of the Phi node generation? */
1269 #if USE_EXPLICIT_PHI_IN_STACK
1270 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1272 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1273 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1276 /* The in-array can contain NULLs. These were returned by
1277 get_r_value_internal if it reached the same block/definition on a
1278 second path. The NULLs are replaced by the node itself to
1279 simplify the test in the next loop. */
1280 for (i = 0; i < ins; ++i) {
1285 /* This loop checks whether the Phi has more than one predecessor.
1286 If so, it is a real Phi node and we break the loop. Else the Phi
1287 node merges the same definition on several paths and therefore is
1289 for (i = 0; i < ins; ++i)
1291 if (in[i] == res || in[i] == known) continue;
1299 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1301 #if USE_EXPLICIT_PHI_IN_STACK
1302 free_to_Phi_in_stack(res);
1304 obstack_free (current_ir_graph->obst, res);
1308 res = optimize_node (res);
1309 IRN_VRFY_IRG(res, irg);
1312 /* return the pointer to the Phi node. This node might be deallocated! */
1317 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1320 allocates and returns this node. The routine called to allocate the
1321 node might optimize it away and return a real value, or even a pointer
1322 to a deallocated Phi node on top of the obstack!
1323 This function is called with an in-array of proper size. **/
1325 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1327 ir_node *prevBlock, *res;
1330 /* This loop goes to all predecessor blocks of the block the Phi node is in
1331 and there finds the operands of the Phi node by calling
1332 get_r_value_internal. */
1333 for (i = 1; i <= ins; ++i) {
1334 assert (block->in[i]);
1335 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1337 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1340 /* After collecting all predecessors into the array nin a new Phi node
1341 with these predecessors is created. This constructor contains an
1342 optimization: If all predecessors of the Phi node are identical it
1343 returns the only operand instead of a new Phi node. If the value
1344 passes two different control flow edges without being defined, and
1345 this is the second path treated, a pointer to the node that will be
1346 allocated for the first path (recursion) is returned. We already
1347 know the address of this node, as it is the next node to be allocated
1348 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1349 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1351 /* Now we now the value for "pos" and can enter it in the array with
1352 all known local variables. Attention: this might be a pointer to
1353 a node, that later will be allocated!!! See new_rd_Phi_in.
1354 If this is called in mature, after some set_value in the same block,
1355 the proper value must not be overwritten:
1357 get_value (makes Phi0, put's it into graph_arr)
1358 set_value (overwrites Phi0 in graph_arr)
1359 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1362 if (!block->attr.block.graph_arr[pos]) {
1363 block->attr.block.graph_arr[pos] = res;
1365 /* printf(" value already computed by %s\n",
1366 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1372 /* This function returns the last definition of a variable. In case
1373 this variable was last defined in a previous block, Phi nodes are
1374 inserted. If the part of the firm graph containing the definition
1375 is not yet constructed, a dummy Phi node is returned. */
1377 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1380 /* There are 4 cases to treat.
1382 1. The block is not mature and we visit it the first time. We can not
1383 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1384 predecessors is returned. This node is added to the linked list (field
1385 "link") of the containing block to be completed when this block is
1386 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1389 2. The value is already known in this block, graph_arr[pos] is set and we
1390 visit the block the first time. We can return the value without
1391 creating any new nodes.
1393 3. The block is mature and we visit it the first time. A Phi node needs
1394 to be created (phi_merge). If the Phi is not needed, as all it's
1395 operands are the same value reaching the block through different
1396 paths, it's optimized away and the value itself is returned.
1398 4. The block is mature, and we visit it the second time. Now two
1399 subcases are possible:
1400 * The value was computed completely the last time we were here. This
1401 is the case if there is no loop. We can return the proper value.
1402 * The recursion that visited this node and set the flag did not
1403 return yet. We are computing a value in a loop and need to
1404 break the recursion without knowing the result yet.
1405 @@@ strange case. Straight forward we would create a Phi before
1406 starting the computation of it's predecessors. In this case we will
1407 find a Phi here in any case. The problem is that this implementation
1408 only creates a Phi after computing the predecessors, so that it is
1409 hard to compute self references of this Phi. @@@
1410 There is no simple check for the second subcase. Therefore we check
1411 for a second visit and treat all such cases as the second subcase.
1412 Anyways, the basic situation is the same: we reached a block
1413 on two paths without finding a definition of the value: No Phi
1414 nodes are needed on both paths.
1415 We return this information "Two paths, no Phi needed" by a very tricky
1416 implementation that relies on the fact that an obstack is a stack and
1417 will return a node with the same address on different allocations.
1418 Look also at phi_merge and new_rd_phi_in to understand this.
1419 @@@ Unfortunately this does not work, see testprogram
1420 three_cfpred_example.
1424 /* case 4 -- already visited. */
1425 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1427 /* visited the first time */
1428 set_irn_visited(block, get_irg_visited(current_ir_graph));
1430 /* Get the local valid value */
1431 res = block->attr.block.graph_arr[pos];
1433 /* case 2 -- If the value is actually computed, return it. */
1434 if (res) return res;
1436 if (block->attr.block.matured) { /* case 3 */
1438 /* The Phi has the same amount of ins as the corresponding block. */
1439 int ins = get_irn_arity(block);
1441 NEW_ARR_A (ir_node *, nin, ins);
1443 /* Phi merge collects the predecessors and then creates a node. */
1444 res = phi_merge (block, pos, mode, nin, ins);
1446 } else { /* case 1 */
1447 /* The block is not mature, we don't know how many in's are needed. A Phi
1448 with zero predecessors is created. Such a Phi node is called Phi0
1449 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1450 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1452 The Phi0 has to remember the pos of it's internal value. If the real
1453 Phi is computed, pos is used to update the array with the local
1456 res = new_rd_Phi0 (current_ir_graph, block, mode);
1457 res->attr.phi0_pos = pos;
1458 res->link = block->link;
1462 /* If we get here, the frontend missed a use-before-definition error */
1465 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1466 assert (mode->code >= irm_F && mode->code <= irm_P);
1467 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1468 tarval_mode_null[mode->code]);
1471 /* The local valid value is available now. */
1472 block->attr.block.graph_arr[pos] = res;
1480 it starts the recursion. This causes an Id at the entry of
1481 every block that has no definition of the value! **/
1483 #if USE_EXPLICIT_PHI_IN_STACK
1485 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1486 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1489 static INLINE ir_node *
1490 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1491 ir_node **in, int ins, ir_node *phi0)
1494 ir_node *res, *known;
1496 /* Allocate a new node on the obstack. The allocation copies the in
1498 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1499 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1501 /* This loop checks whether the Phi has more than one predecessor.
1502 If so, it is a real Phi node and we break the loop. Else the
1503 Phi node merges the same definition on several paths and therefore
1504 is not needed. Don't consider Bad nodes! */
1506 for (i=0; i < ins; ++i)
1510 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1512 /* Optimize self referencing Phis: We can't detect them yet properly, as
1513 they still refer to the Phi0 they will replace. So replace right now. */
1514 if (phi0 && in[i] == phi0) in[i] = res;
1516 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1524 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1527 obstack_free (current_ir_graph->obst, res);
1528 if (is_Phi(known)) {
1529 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1530 order, an enclosing Phi know may get superfluous. */
1531 res = optimize_in_place_2(known);
1532 if (res != known) { exchange(known, res); }
1537 /* A undefined value, e.g., in unreachable code. */
1541 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1542 IRN_VRFY_IRG(res, irg);
1543 /* Memory Phis in endless loops must be kept alive.
1544 As we can't distinguish these easily we keep all of them alive. */
1545 if ((res->op == op_Phi) && (mode == mode_M))
1546 add_End_keepalive(irg->end, res);
1553 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1555 #if PRECISE_EXC_CONTEXT
1557 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1559 /* Construct a new frag_array for node n.
1560 Copy the content from the current graph_arr of the corresponding block:
1561 this is the current state.
1562 Set ProjM(n) as current memory state.
1563 Further the last entry in frag_arr of current block points to n. This
1564 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1566 static INLINE ir_node ** new_frag_arr (ir_node *n)
1571 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1572 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1573 sizeof(ir_node *)*current_ir_graph->n_loc);
1575 /* turn off optimization before allocating Proj nodes, as res isn't
1577 opt = get_opt_optimize(); set_optimize(0);
1578 /* Here we rely on the fact that all frag ops have Memory as first result! */
1579 if (get_irn_op(n) == op_Call)
1580 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1582 assert((pn_Quot_M == pn_DivMod_M) &&
1583 (pn_Quot_M == pn_Div_M) &&
1584 (pn_Quot_M == pn_Mod_M) &&
1585 (pn_Quot_M == pn_Load_M) &&
1586 (pn_Quot_M == pn_Store_M) &&
1587 (pn_Quot_M == pn_Alloc_M) );
1588 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1592 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1597 * returns the frag_arr from a node
1599 static INLINE ir_node **
1600 get_frag_arr (ir_node *n) {
1601 switch (get_irn_opcode(n)) {
1603 return n->attr.call.exc.frag_arr;
1605 return n->attr.a.exc.frag_arr;
1607 return n->attr.load.exc.frag_arr;
1609 return n->attr.store.exc.frag_arr;
1611 return n->attr.except.frag_arr;
1616 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1618 if (!frag_arr[pos]) frag_arr[pos] = val;
1619 if (frag_arr[current_ir_graph->n_loc - 1]) {
1620 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1621 assert(arr != frag_arr && "Endless recursion detected");
1622 set_frag_value(arr, pos, val);
1627 for (i = 0; i < 1000; ++i) {
1628 if (!frag_arr[pos]) {
1629 frag_arr[pos] = val;
1631 if (frag_arr[current_ir_graph->n_loc - 1]) {
1632 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1638 assert(0 && "potential endless recursion");
1643 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1647 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1649 frag_arr = get_frag_arr(cfOp);
1650 res = frag_arr[pos];
1652 if (block->attr.block.graph_arr[pos]) {
1653 /* There was a set_value after the cfOp and no get_value before that
1654 set_value. We must build a Phi node now. */
1655 if (block->attr.block.matured) {
1656 int ins = get_irn_arity(block);
1658 NEW_ARR_A (ir_node *, nin, ins);
1659 res = phi_merge(block, pos, mode, nin, ins);
1661 res = new_rd_Phi0 (current_ir_graph, block, mode);
1662 res->attr.phi0_pos = pos;
1663 res->link = block->link;
1667 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1668 but this should be better: (remove comment if this works) */
1669 /* It's a Phi, we can write this into all graph_arrs with NULL */
1670 set_frag_value(block->attr.block.graph_arr, pos, res);
1672 res = get_r_value_internal(block, pos, mode);
1673 set_frag_value(block->attr.block.graph_arr, pos, res);
1681 computes the predecessors for the real phi node, and then
1682 allocates and returns this node. The routine called to allocate the
1683 node might optimize it away and return a real value.
1684 This function must be called with an in-array of proper size. **/
1686 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1688 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1691 /* If this block has no value at pos create a Phi0 and remember it
1692 in graph_arr to break recursions.
1693 Else we may not set graph_arr as there a later value is remembered. */
1695 if (!block->attr.block.graph_arr[pos]) {
1696 if (block == get_irg_start_block(current_ir_graph)) {
1697 /* Collapsing to Bad tarvals is no good idea.
1698 So we call a user-supplied routine here that deals with this case as
1699 appropriate for the given language. Sorryly the only help we can give
1700 here is the position.
1702 Even if all variables are defined before use, it can happen that
1703 we get to the start block, if a cond has been replaced by a tuple
1704 (bad, jmp). In this case we call the function needlessly, eventually
1705 generating an non existant error.
1706 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1709 if (default_initialize_local_variable)
1710 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1712 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1713 /* We don't need to care about exception ops in the start block.
1714 There are none by definition. */
1715 return block->attr.block.graph_arr[pos];
1717 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1718 block->attr.block.graph_arr[pos] = phi0;
1719 #if PRECISE_EXC_CONTEXT
1720 if (get_opt_precise_exc_context()) {
1721 /* Set graph_arr for fragile ops. Also here we should break recursion.
1722 We could choose a cyclic path through an cfop. But the recursion would
1723 break at some point. */
1724 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1730 /* This loop goes to all predecessor blocks of the block the Phi node
1731 is in and there finds the operands of the Phi node by calling
1732 get_r_value_internal. */
1733 for (i = 1; i <= ins; ++i) {
1734 prevCfOp = skip_Proj(block->in[i]);
1736 if (is_Bad(prevCfOp)) {
1737 /* In case a Cond has been optimized we would get right to the start block
1738 with an invalid definition. */
1739 nin[i-1] = new_Bad();
1742 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1744 if (!is_Bad(prevBlock)) {
1745 #if PRECISE_EXC_CONTEXT
1746 if (get_opt_precise_exc_context() &&
1747 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1748 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1749 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1752 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1754 nin[i-1] = new_Bad();
1758 /* We want to pass the Phi0 node to the constructor: this finds additional
1759 optimization possibilities.
1760 The Phi0 node either is allocated in this function, or it comes from
1761 a former call to get_r_value_internal. In this case we may not yet
1762 exchange phi0, as this is done in mature_immBlock. */
1764 phi0_all = block->attr.block.graph_arr[pos];
1765 if (!((get_irn_op(phi0_all) == op_Phi) &&
1766 (get_irn_arity(phi0_all) == 0) &&
1767 (get_nodes_block(phi0_all) == block)))
1773 /* After collecting all predecessors into the array nin a new Phi node
1774 with these predecessors is created. This constructor contains an
1775 optimization: If all predecessors of the Phi node are identical it
1776 returns the only operand instead of a new Phi node. */
1777 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1779 /* In case we allocated a Phi0 node at the beginning of this procedure,
1780 we need to exchange this Phi0 with the real Phi. */
1782 exchange(phi0, res);
1783 block->attr.block.graph_arr[pos] = res;
1784 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1785 only an optimization. */
1791 /* This function returns the last definition of a variable. In case
1792 this variable was last defined in a previous block, Phi nodes are
1793 inserted. If the part of the firm graph containing the definition
1794 is not yet constructed, a dummy Phi node is returned. */
1796 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1799 /* There are 4 cases to treat.
1801 1. The block is not mature and we visit it the first time. We can not
1802 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1803 predecessors is returned. This node is added to the linked list (field
1804 "link") of the containing block to be completed when this block is
1805 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1808 2. The value is already known in this block, graph_arr[pos] is set and we
1809 visit the block the first time. We can return the value without
1810 creating any new nodes.
1812 3. The block is mature and we visit it the first time. A Phi node needs
1813 to be created (phi_merge). If the Phi is not needed, as all it's
1814 operands are the same value reaching the block through different
1815 paths, it's optimized away and the value itself is returned.
1817 4. The block is mature, and we visit it the second time. Now two
1818 subcases are possible:
1819 * The value was computed completely the last time we were here. This
1820 is the case if there is no loop. We can return the proper value.
1821 * The recursion that visited this node and set the flag did not
1822 return yet. We are computing a value in a loop and need to
1823 break the recursion. This case only happens if we visited
1824 the same block with phi_merge before, which inserted a Phi0.
1825 So we return the Phi0.
1828 /* case 4 -- already visited. */
1829 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1830 /* As phi_merge allocates a Phi0 this value is always defined. Here
1831 is the critical difference of the two algorithms. */
1832 assert(block->attr.block.graph_arr[pos]);
1833 return block->attr.block.graph_arr[pos];
1836 /* visited the first time */
1837 set_irn_visited(block, get_irg_visited(current_ir_graph));
1839 /* Get the local valid value */
1840 res = block->attr.block.graph_arr[pos];
1842 /* case 2 -- If the value is actually computed, return it. */
1843 if (res) { return res; };
1845 if (block->attr.block.matured) { /* case 3 */
1847 /* The Phi has the same amount of ins as the corresponding block. */
1848 int ins = get_irn_arity(block);
1850 NEW_ARR_A (ir_node *, nin, ins);
1852 /* Phi merge collects the predecessors and then creates a node. */
1853 res = phi_merge (block, pos, mode, nin, ins);
1855 } else { /* case 1 */
1856 /* The block is not mature, we don't know how many in's are needed. A Phi
1857 with zero predecessors is created. Such a Phi node is called Phi0
1858 node. The Phi0 is then added to the list of Phi0 nodes in this block
1859 to be matured by mature_immBlock later.
1860 The Phi0 has to remember the pos of it's internal value. If the real
1861 Phi is computed, pos is used to update the array with the local
1863 res = new_rd_Phi0 (current_ir_graph, block, mode);
1864 res->attr.phi0_pos = pos;
1865 res->link = block->link;
1869 /* If we get here, the frontend missed a use-before-definition error */
1872 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1873 assert (mode->code >= irm_F && mode->code <= irm_P);
1874 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1875 get_mode_null(mode));
1878 /* The local valid value is available now. */
1879 block->attr.block.graph_arr[pos] = res;
1884 #endif /* USE_FAST_PHI_CONSTRUCTION */
1886 /* ************************************************************************** */
1888 /** Finalize a Block node, when all control flows are known. */
1889 /** Acceptable parameters are only Block nodes. */
1891 mature_immBlock (ir_node *block)
1898 assert (get_irn_opcode(block) == iro_Block);
1899 /* @@@ should be commented in
1900 assert (!get_Block_matured(block) && "Block already matured"); */
1902 if (!get_Block_matured(block)) {
1903 ins = ARR_LEN (block->in)-1;
1904 /* Fix block parameters */
1905 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1907 /* An array for building the Phi nodes. */
1908 NEW_ARR_A (ir_node *, nin, ins);
1910 /* Traverse a chain of Phi nodes attached to this block and mature
1912 for (n = block->link; n; n=next) {
1913 inc_irg_visited(current_ir_graph);
1915 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1918 block->attr.block.matured = 1;
1920 /* Now, as the block is a finished firm node, we can optimize it.
1921 Since other nodes have been allocated since the block was created
1922 we can not free the node on the obstack. Therefore we have to call
1924 Unfortunately the optimization does not change a lot, as all allocated
1925 nodes refer to the unoptimized node.
1926 We can call _2, as global cse has no effect on blocks. */
1927 block = optimize_in_place_2(block);
1928 IRN_VRFY_IRG(block, current_ir_graph);
1933 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1935 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1940 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1942 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1947 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1949 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1955 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1957 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1962 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1964 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1969 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1972 assert(arg->op == op_Cond);
1973 arg->attr.c.kind = fragmentary;
1974 arg->attr.c.default_proj = max_proj;
1975 res = new_Proj (arg, mode_X, max_proj);
1980 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1982 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
1987 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1989 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1993 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1995 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2000 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2002 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2007 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2009 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2015 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2017 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2022 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2024 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2029 * allocate the frag array
2031 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2032 if (get_opt_precise_exc_context()) {
2033 if ((current_ir_graph->phase_state == phase_building) &&
2034 (get_irn_op(res) == op) && /* Could be optimized away. */
2035 !*frag_store) /* Could be a cse where the arr is already set. */ {
2036 *frag_store = new_frag_arr(res);
2043 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2046 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2048 res->attr.except.pin_state = op_pin_state_pinned;
2049 #if PRECISE_EXC_CONTEXT
2050 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2057 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2060 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2062 res->attr.except.pin_state = op_pin_state_pinned;
2063 #if PRECISE_EXC_CONTEXT
2064 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2071 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2074 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2076 res->attr.except.pin_state = op_pin_state_pinned;
2077 #if PRECISE_EXC_CONTEXT
2078 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2085 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2088 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2090 res->attr.except.pin_state = op_pin_state_pinned;
2091 #if PRECISE_EXC_CONTEXT
2092 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2099 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2101 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2106 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2108 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2113 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2115 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2120 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2122 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2127 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2129 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2134 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2136 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2141 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2143 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2148 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2150 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2155 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2157 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2162 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2164 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2169 new_d_Jmp (dbg_info* db)
2171 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2175 new_d_Cond (dbg_info* db, ir_node *c)
2177 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2181 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2185 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2186 store, callee, arity, in, tp);
2187 #if PRECISE_EXC_CONTEXT
2188 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2195 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2197 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2202 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2204 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2209 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2212 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2214 #if PRECISE_EXC_CONTEXT
2215 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2222 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2225 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2227 #if PRECISE_EXC_CONTEXT
2228 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2235 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2239 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2240 store, size, alloc_type, where);
2241 #if PRECISE_EXC_CONTEXT
2242 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2249 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2251 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2252 store, ptr, size, free_type);
2256 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2257 /* GL: objptr was called frame before. Frame was a bad choice for the name
2258 as the operand could as well be a pointer to a dynamic object. */
2260 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2261 store, objptr, 0, NULL, ent);
2265 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2267 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2268 store, objptr, n_index, index, sel);
2272 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2274 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2275 store, objptr, ent));
2279 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2281 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2286 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2288 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2293 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2295 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2303 return __new_d_Bad();
2307 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2309 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2314 new_d_Unknown (ir_mode *m)
2316 return new_rd_Unknown(current_ir_graph, m);
2320 new_d_CallBegin (dbg_info *db, ir_node *call)
2323 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2328 new_d_EndReg (dbg_info *db)
2331 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2336 new_d_EndExcept (dbg_info *db)
2339 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2344 new_d_Break (dbg_info *db)
2346 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2350 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2352 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2359 return __new_d_NoMem();
2362 /* ********************************************************************* */
2363 /* Comfortable interface with automatic Phi node construction. */
2364 /* (Uses also constructors of ?? interface, except new_Block. */
2365 /* ********************************************************************* */
2367 /* * Block construction **/
2368 /* immature Block without predecessors */
2369 ir_node *new_d_immBlock (dbg_info* db) {
2372 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2373 /* creates a new dynamic in-array as length of in is -1 */
2374 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2375 current_ir_graph->current_block = res;
2376 res->attr.block.matured = 0;
2377 /* res->attr.block.exc = exc_normal; */
2378 /* res->attr.block.handler_entry = 0; */
2379 res->attr.block.irg = current_ir_graph;
2380 res->attr.block.backedge = NULL;
2381 res->attr.block.in_cg = NULL;
2382 res->attr.block.cg_backedge = NULL;
2383 set_Block_block_visited(res, 0);
2385 /* Create and initialize array for Phi-node construction. */
2386 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2387 current_ir_graph->n_loc);
2388 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2390 /* Immature block may not be optimized! */
2391 IRN_VRFY_IRG(res, current_ir_graph);
2397 new_immBlock (void) {
2398 return new_d_immBlock(NULL);
2401 /* add an adge to a jmp/control flow node */
2403 add_immBlock_pred (ir_node *block, ir_node *jmp)
2405 if (block->attr.block.matured) {
2406 assert(0 && "Error: Block already matured!\n");
2409 assert(jmp != NULL);
2410 ARR_APP1(ir_node *, block->in, jmp);
2414 /* changing the current block */
2416 set_cur_block (ir_node *target)
2418 current_ir_graph->current_block = target;
2421 /* ************************ */
2422 /* parameter administration */
2424 /* get a value from the parameter array from the current block by its index */
2426 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2428 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2429 inc_irg_visited(current_ir_graph);
2431 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2433 /* get a value from the parameter array from the current block by its index */
2435 get_value (int pos, ir_mode *mode)
2437 return get_d_value(NULL, pos, mode);
2440 /* set a value at position pos in the parameter array from the current block */
2442 set_value (int pos, ir_node *value)
2444 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2445 assert(pos+1 < current_ir_graph->n_loc);
2446 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2449 /* get the current store */
2453 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2454 /* GL: one could call get_value instead */
2455 inc_irg_visited(current_ir_graph);
2456 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2459 /* set the current store */
2461 set_store (ir_node *store)
2463 /* GL: one could call set_value instead */
2464 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2465 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2469 keep_alive (ir_node *ka)
2471 add_End_keepalive(current_ir_graph->end, ka);
2474 /** Useful access routines **/
2475 /* Returns the current block of the current graph. To set the current
2476 block use set_cur_block. */
2477 ir_node *get_cur_block() {
2478 return get_irg_current_block(current_ir_graph);
2481 /* Returns the frame type of the current graph */
2482 type *get_cur_frame_type() {
2483 return get_irg_frame_type(current_ir_graph);
2487 /* ********************************************************************* */
2490 /* call once for each run of the library */
2492 init_cons (default_initialize_local_variable_func_t *func)
2494 default_initialize_local_variable = func;
2497 /* call for each graph */
2499 finalize_cons (ir_graph *irg) {
2500 irg->phase_state = phase_high;
2504 ir_node *new_Block(int arity, ir_node **in) {
2505 return new_d_Block(NULL, arity, in);
2507 ir_node *new_Start (void) {
2508 return new_d_Start(NULL);
2510 ir_node *new_End (void) {
2511 return new_d_End(NULL);
2513 ir_node *new_Jmp (void) {
2514 return new_d_Jmp(NULL);
2516 ir_node *new_Cond (ir_node *c) {
2517 return new_d_Cond(NULL, c);
2519 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2520 return new_d_Return(NULL, store, arity, in);
2522 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2523 return new_d_Raise(NULL, store, obj);
2525 ir_node *new_Const (ir_mode *mode, tarval *con) {
2526 return new_d_Const(NULL, mode, con);
2529 ir_node *new_Const_type(tarval *con, type *tp) {
2530 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2533 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2534 return new_d_SymConst(NULL, value, kind);
2536 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2537 return new_d_simpleSel(NULL, store, objptr, ent);
2539 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2541 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2543 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2544 return new_d_InstOf (NULL, store, objptr, ent);
2546 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2548 return new_d_Call(NULL, store, callee, arity, in, tp);
2550 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2551 return new_d_Add(NULL, op1, op2, mode);
2553 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2554 return new_d_Sub(NULL, op1, op2, mode);
2556 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2557 return new_d_Minus(NULL, op, mode);
2559 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2560 return new_d_Mul(NULL, op1, op2, mode);
2562 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2563 return new_d_Quot(NULL, memop, op1, op2);
2565 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2566 return new_d_DivMod(NULL, memop, op1, op2);
2568 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2569 return new_d_Div(NULL, memop, op1, op2);
2571 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2572 return new_d_Mod(NULL, memop, op1, op2);
2574 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2575 return new_d_Abs(NULL, op, mode);
2577 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2578 return new_d_And(NULL, op1, op2, mode);
2580 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2581 return new_d_Or(NULL, op1, op2, mode);
2583 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2584 return new_d_Eor(NULL, op1, op2, mode);
2586 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2587 return new_d_Not(NULL, op, mode);
2589 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2590 return new_d_Shl(NULL, op, k, mode);
2592 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2593 return new_d_Shr(NULL, op, k, mode);
2595 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2596 return new_d_Shrs(NULL, op, k, mode);
2598 #define new_Rotate new_Rot
2599 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2600 return new_d_Rot(NULL, op, k, mode);
2602 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2603 return new_d_Cmp(NULL, op1, op2);
2605 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2606 return new_d_Conv(NULL, op, mode);
2608 ir_node *new_Cast (ir_node *op, type *to_tp) {
2609 return new_d_Cast(NULL, op, to_tp);
2611 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2612 return new_d_Phi(NULL, arity, in, mode);
2614 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2615 return new_d_Load(NULL, store, addr, mode);
2617 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2618 return new_d_Store(NULL, store, addr, val);
2620 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2621 where_alloc where) {
2622 return new_d_Alloc(NULL, store, size, alloc_type, where);
2624 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2626 return new_d_Free(NULL, store, ptr, size, free_type);
2628 ir_node *new_Sync (int arity, ir_node **in) {
2629 return new_d_Sync(NULL, arity, in);
2631 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2632 return new_d_Proj(NULL, arg, mode, proj);
2634 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2635 return new_d_defaultProj(NULL, arg, max_proj);
2637 ir_node *new_Tuple (int arity, ir_node **in) {
2638 return new_d_Tuple(NULL, arity, in);
2640 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2641 return new_d_Id(NULL, val, mode);
2643 ir_node *new_Bad (void) {
2646 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2647 return new_d_Confirm (NULL, val, bound, cmp);
2649 ir_node *new_Unknown(ir_mode *m) {
2650 return new_d_Unknown(m);
2652 ir_node *new_CallBegin (ir_node *callee) {
2653 return new_d_CallBegin(NULL, callee);
2655 ir_node *new_EndReg (void) {
2656 return new_d_EndReg(NULL);
2658 ir_node *new_EndExcept (void) {
2659 return new_d_EndExcept(NULL);
2661 ir_node *new_Break (void) {
2662 return new_d_Break(NULL);
2664 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2665 return new_d_Filter(NULL, arg, mode, proj);
2667 ir_node *new_NoMem (void) {
2668 return new_d_NoMem();