3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /*** ******************************************** */
56 /** privat interfaces, for professional use only */
58 /* Constructs a Block with a fixed number of predecessors.
59 Does not set current_block. Can not be used with automatic
60 Phi node construction. */
62 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
66 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
67 set_Block_matured(res, 1);
68 set_Block_block_visited(res, 0);
70 /* res->attr.block.exc = exc_normal; */
71 /* res->attr.block.handler_entry = 0; */
72 res->attr.block.irg = irg;
73 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
74 res->attr.block.in_cg = NULL;
75 res->attr.block.cg_backedge = NULL;
77 IRN_VRFY_IRG(res, irg);
82 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
86 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
87 /* res->attr.start.irg = irg; */
89 IRN_VRFY_IRG(res, irg);
94 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
100 IRN_VRFY_IRG(res, irg);
104 /* Creates a Phi node with all predecessors. Calling this constructor
105 is only allowed if the corresponding block is mature. */
107 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
111 bool has_unknown = false;
113 /* Don't assert that block matured: the use of this constructor is strongly
115 if ( get_Block_matured(block) )
116 assert( get_irn_arity(block) == arity );
118 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
120 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
122 for (i = arity-1; i >= 0; i--)
123 if (get_irn_op(in[i]) == op_Unknown) {
128 if (!has_unknown) res = optimize_node (res);
129 IRN_VRFY_IRG(res, irg);
131 /* Memory Phis in endless loops must be kept alive.
132 As we can't distinguish these easily we keep all of them alive. */
133 if ((res->op == op_Phi) && (mode == mode_M))
134 add_End_keepalive(irg->end, res);
139 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
143 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
144 res->attr.con.tv = con;
145 set_Const_type(res, tp); /* Call method because of complex assertion. */
146 res = optimize_node (res);
147 assert(get_Const_type(res) == tp);
148 IRN_VRFY_IRG(res, irg);
154 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
156 type *tp = unknown_type;
157 /* removing this somehow causes errors in jack. */
158 return new_rd_Const_type (db, irg, block, mode, con, tp);
162 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
166 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
167 res = optimize_node(res);
168 IRN_VRFY_IRG(res, irg);
173 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
178 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
179 res->attr.proj = proj;
182 assert(get_Proj_pred(res));
183 assert(get_nodes_block(get_Proj_pred(res)));
185 res = optimize_node(res);
187 IRN_VRFY_IRG(res, irg);
193 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
197 assert(arg->op == op_Cond);
198 arg->attr.c.kind = fragmentary;
199 arg->attr.c.default_proj = max_proj;
200 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
205 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
209 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
210 res = optimize_node(res);
211 IRN_VRFY_IRG(res, irg);
216 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
220 assert(is_atomic_type(to_tp));
222 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
223 res->attr.cast.totype = to_tp;
224 res = optimize_node(res);
225 IRN_VRFY_IRG(res, irg);
230 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
234 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
235 res = optimize_node (res);
236 IRN_VRFY_IRG(res, irg);
241 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
242 ir_node *op1, ir_node *op2, ir_mode *mode)
249 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
250 res = optimize_node(res);
251 IRN_VRFY_IRG(res, irg);
256 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
257 ir_node *op1, ir_node *op2, ir_mode *mode)
264 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
272 ir_node *op, ir_mode *mode)
276 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
277 res = optimize_node(res);
278 IRN_VRFY_IRG(res, irg);
283 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
284 ir_node *op1, ir_node *op2, ir_mode *mode)
291 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
292 res = optimize_node(res);
293 IRN_VRFY_IRG(res, irg);
298 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
299 ir_node *memop, ir_node *op1, ir_node *op2)
307 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
308 res = optimize_node(res);
309 IRN_VRFY_IRG(res, irg);
314 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
315 ir_node *memop, ir_node *op1, ir_node *op2)
323 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
324 res = optimize_node(res);
325 IRN_VRFY_IRG(res, irg);
330 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
331 ir_node *memop, ir_node *op1, ir_node *op2)
339 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
340 res = optimize_node(res);
341 IRN_VRFY_IRG(res, irg);
346 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
347 ir_node *memop, ir_node *op1, ir_node *op2)
355 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
356 res = optimize_node(res);
357 IRN_VRFY_IRG(res, irg);
362 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
363 ir_node *op1, ir_node *op2, ir_mode *mode)
370 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
371 res = optimize_node(res);
372 IRN_VRFY_IRG(res, irg);
377 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
378 ir_node *op1, ir_node *op2, ir_mode *mode)
385 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
386 res = optimize_node(res);
387 IRN_VRFY_IRG(res, irg);
392 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
393 ir_node *op1, ir_node *op2, ir_mode *mode)
400 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
401 res = optimize_node (res);
402 IRN_VRFY_IRG(res, irg);
407 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
408 ir_node *op, ir_mode *mode)
412 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
413 res = optimize_node(res);
414 IRN_VRFY_IRG(res, irg);
419 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
420 ir_node *op, ir_node *k, ir_mode *mode)
427 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
428 res = optimize_node(res);
429 IRN_VRFY_IRG(res, irg);
434 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
435 ir_node *op, ir_node *k, ir_mode *mode)
442 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
450 ir_node *op, ir_node *k, ir_mode *mode)
457 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
458 res = optimize_node(res);
459 IRN_VRFY_IRG(res, irg);
464 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
465 ir_node *op, ir_node *k, ir_mode *mode)
472 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
473 res = optimize_node(res);
474 IRN_VRFY_IRG(res, irg);
479 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
480 ir_node *op, ir_mode *mode)
484 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
485 res = optimize_node (res);
486 IRN_VRFY_IRG(res, irg);
491 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
492 ir_node *op1, ir_node *op2)
499 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
500 res = optimize_node(res);
501 IRN_VRFY_IRG(res, irg);
506 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
510 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
511 res = optimize_node (res);
512 IRN_VRFY_IRG (res, irg);
517 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
521 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
522 res->attr.c.kind = dense;
523 res->attr.c.default_proj = 0;
524 res = optimize_node (res);
525 IRN_VRFY_IRG(res, irg);
530 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
531 ir_node *callee, int arity, ir_node **in, type *tp)
538 NEW_ARR_A(ir_node *, r_in, r_arity);
541 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
543 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
545 assert((get_unknown_type() == tp) || is_method_type(tp));
546 set_Call_type(res, tp);
547 res->attr.call.exc.pin_state = op_pin_state_pinned;
548 res->attr.call.callee_arr = NULL;
549 res = optimize_node(res);
550 IRN_VRFY_IRG(res, irg);
555 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
556 ir_node *store, int arity, ir_node **in)
563 NEW_ARR_A (ir_node *, r_in, r_arity);
565 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
566 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
580 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
581 res = optimize_node(res);
582 IRN_VRFY_IRG(res, irg);
587 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
588 ir_node *store, ir_node *adr, ir_mode *mode)
595 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
596 res->attr.load.exc.pin_state = op_pin_state_pinned;
597 res->attr.load.load_mode = mode;
598 res->attr.load.volatility = volatility_non_volatile;
599 res = optimize_node(res);
600 IRN_VRFY_IRG(res, irg);
605 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
606 ir_node *store, ir_node *adr, ir_node *val)
614 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
615 res->attr.store.exc.pin_state = op_pin_state_pinned;
616 res->attr.store.volatility = volatility_non_volatile;
617 res = optimize_node(res);
618 IRN_VRFY_IRG(res, irg);
623 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
624 ir_node *size, type *alloc_type, where_alloc where)
631 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
632 res->attr.a.exc.pin_state = op_pin_state_pinned;
633 res->attr.a.where = where;
634 res->attr.a.type = alloc_type;
635 res = optimize_node(res);
636 IRN_VRFY_IRG(res, irg);
641 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
642 ir_node *ptr, ir_node *size, type *free_type)
650 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
651 res->attr.f = free_type;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
658 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
659 int arity, ir_node **in, entity *ent)
665 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
668 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
671 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
672 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
673 res->attr.s.ent = ent;
674 res = optimize_node(res);
675 IRN_VRFY_IRG(res, irg);
680 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
681 ir_node *objptr, type *ent)
688 NEW_ARR_A(ir_node *, r_in, r_arity);
692 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
693 res->attr.io.ent = ent;
695 /* res = optimize(res); */
696 IRN_VRFY_IRG(res, irg);
701 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
702 symconst_kind symkind, type *tp) {
706 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
711 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
713 res->attr.i.num = symkind;
714 res->attr.i.sym = value;
717 res = optimize_node(res);
718 IRN_VRFY_IRG(res, irg);
723 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
724 symconst_kind symkind)
726 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
730 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
731 symconst_symbol sym = {(type *)symbol};
732 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
735 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
736 symconst_symbol sym = {(type *)symbol};
737 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
740 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
741 symconst_symbol sym = {symbol};
742 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
745 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
746 symconst_symbol sym = {symbol};
747 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
751 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
755 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
756 res = optimize_node(res);
757 IRN_VRFY_IRG(res, irg);
762 new_rd_Bad (ir_graph *irg)
768 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
770 ir_node *in[2], *res;
774 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
775 res->attr.confirm_cmp = cmp;
776 res = optimize_node (res);
777 IRN_VRFY_IRG(res, irg);
782 new_rd_Unknown (ir_graph *irg, ir_mode *m)
784 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
788 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
793 in[0] = get_Call_ptr(call);
794 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
795 /* res->attr.callbegin.irg = irg; */
796 res->attr.callbegin.call = call;
797 res = optimize_node(res);
798 IRN_VRFY_IRG(res, irg);
803 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
807 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
809 IRN_VRFY_IRG(res, irg);
814 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
818 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
819 irg->end_except = res;
820 IRN_VRFY_IRG (res, irg);
825 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
829 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
830 res = optimize_node(res);
831 IRN_VRFY_IRG(res, irg);
836 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
841 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
842 res->attr.filter.proj = proj;
843 res->attr.filter.in_cg = NULL;
844 res->attr.filter.backedge = NULL;
847 assert(get_Proj_pred(res));
848 assert(get_nodes_block(get_Proj_pred(res)));
850 res = optimize_node(res);
851 IRN_VRFY_IRG(res, irg);
857 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
858 ir_node *callee, int arity, ir_node **in, type *tp)
865 NEW_ARR_A(ir_node *, r_in, r_arity);
867 memcpy(&r_in[1], in, sizeof (ir_node *) * arity);
869 res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
871 assert(is_method_type(tp));
872 set_FuncCall_type(res, tp);
873 res->attr.call.callee_arr = NULL;
874 res = optimize_node(res);
875 IRN_VRFY_IRG(res, irg);
880 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
881 return new_rd_Block(NULL, irg, arity, in);
883 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
884 return new_rd_Start(NULL, irg, block);
886 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
887 return new_rd_End(NULL, irg, block);
889 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
890 return new_rd_Jmp(NULL, irg, block);
892 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
893 return new_rd_Cond(NULL, irg, block, c);
895 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
896 ir_node *store, int arity, ir_node **in) {
897 return new_rd_Return(NULL, irg, block, store, arity, in);
899 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
900 ir_node *store, ir_node *obj) {
901 return new_rd_Raise(NULL, irg, block, store, obj);
903 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
904 ir_mode *mode, tarval *con) {
905 return new_rd_Const(NULL, irg, block, mode, con);
907 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
908 symconst_symbol value, symconst_kind symkind) {
909 return new_rd_SymConst(NULL, irg, block, value, symkind);
911 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
912 ir_node *objptr, int n_index, ir_node **index,
914 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
916 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
918 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
920 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
921 ir_node *callee, int arity, ir_node **in,
923 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
925 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
926 ir_node *op1, ir_node *op2, ir_mode *mode) {
927 return new_rd_Add(NULL, irg, block, op1, op2, mode);
929 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
930 ir_node *op1, ir_node *op2, ir_mode *mode) {
931 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
933 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
934 ir_node *op, ir_mode *mode) {
935 return new_rd_Minus(NULL, irg, block, op, mode);
937 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
938 ir_node *op1, ir_node *op2, ir_mode *mode) {
939 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
941 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
942 ir_node *memop, ir_node *op1, ir_node *op2) {
943 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
945 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
946 ir_node *memop, ir_node *op1, ir_node *op2) {
947 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
949 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
950 ir_node *memop, ir_node *op1, ir_node *op2) {
951 return new_rd_Div(NULL, irg, block, memop, op1, op2);
953 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
954 ir_node *memop, ir_node *op1, ir_node *op2) {
955 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
957 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
958 ir_node *op, ir_mode *mode) {
959 return new_rd_Abs(NULL, irg, block, op, mode);
961 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
962 ir_node *op1, ir_node *op2, ir_mode *mode) {
963 return new_rd_And(NULL, irg, block, op1, op2, mode);
965 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
966 ir_node *op1, ir_node *op2, ir_mode *mode) {
967 return new_rd_Or(NULL, irg, block, op1, op2, mode);
969 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
970 ir_node *op1, ir_node *op2, ir_mode *mode) {
971 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
973 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
974 ir_node *op, ir_mode *mode) {
975 return new_rd_Not(NULL, irg, block, op, mode);
977 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
978 ir_node *op1, ir_node *op2) {
979 return new_rd_Cmp(NULL, irg, block, op1, op2);
981 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
982 ir_node *op, ir_node *k, ir_mode *mode) {
983 return new_rd_Shl(NULL, irg, block, op, k, mode);
985 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
986 ir_node *op, ir_node *k, ir_mode *mode) {
987 return new_rd_Shr(NULL, irg, block, op, k, mode);
989 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
990 ir_node *op, ir_node *k, ir_mode *mode) {
991 return new_rd_Shrs(NULL, irg, block, op, k, mode);
993 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
994 ir_node *op, ir_node *k, ir_mode *mode) {
995 return new_rd_Rot(NULL, irg, block, op, k, mode);
997 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
998 ir_node *op, ir_mode *mode) {
999 return new_rd_Conv(NULL, irg, block, op, mode);
1001 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1002 return new_rd_Cast(NULL, irg, block, op, to_tp);
1004 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1005 ir_node **in, ir_mode *mode) {
1006 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1008 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1009 ir_node *store, ir_node *adr, ir_mode *mode) {
1010 return new_rd_Load(NULL, irg, block, store, adr, mode);
1012 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1013 ir_node *store, ir_node *adr, ir_node *val) {
1014 return new_rd_Store(NULL, irg, block, store, adr, val);
1016 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1017 ir_node *size, type *alloc_type, where_alloc where) {
1018 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1020 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1021 ir_node *ptr, ir_node *size, type *free_type) {
1022 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1024 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1025 return new_rd_Sync(NULL, irg, block, arity, in);
1027 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1028 ir_mode *mode, long proj) {
1029 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1031 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1033 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1035 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1036 int arity, ir_node **in) {
1037 return new_rd_Tuple(NULL, irg, block, arity, in );
1039 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1040 ir_node *val, ir_mode *mode) {
1041 return new_rd_Id(NULL, irg, block, val, mode);
1043 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1044 return new_rd_Bad(irg);
1046 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1047 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1049 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1050 return new_rd_Unknown(irg, m);
1052 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1053 return new_rd_CallBegin(NULL, irg, block, callee);
1055 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1056 return new_rd_EndReg(NULL, irg, block);
1058 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1059 return new_rd_EndExcept(NULL, irg, block);
1061 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1062 return new_rd_Break(NULL, irg, block);
1064 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1065 ir_mode *mode, long proj) {
1066 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1068 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1069 ir_node *callee, int arity, ir_node **in,
1071 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1075 /** ********************/
1076 /** public interfaces */
1077 /** construction tools */
1081 * - create a new Start node in the current block
1083 * @return s - pointer to the created Start node
1088 new_d_Start (dbg_info* db)
1092 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1093 op_Start, mode_T, 0, NULL);
1094 /* res->attr.start.irg = current_ir_graph; */
1096 res = optimize_node(res);
1097 IRN_VRFY_IRG(res, current_ir_graph);
1102 new_d_End (dbg_info* db)
1105 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1106 op_End, mode_X, -1, NULL);
1107 res = optimize_node(res);
1108 IRN_VRFY_IRG(res, current_ir_graph);
1113 /* Constructs a Block with a fixed number of predecessors.
1114 Does set current_block. Can be used with automatic Phi
1115 node construction. */
1117 new_d_Block (dbg_info* db, int arity, ir_node **in)
1121 bool has_unknown = false;
1123 res = new_rd_Block(db, current_ir_graph, arity, in);
1125 /* Create and initialize array for Phi-node construction. */
1126 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1127 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1128 current_ir_graph->n_loc);
1129 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1132 for (i = arity-1; i >= 0; i--)
1133 if (get_irn_op(in[i]) == op_Unknown) {
1138 if (!has_unknown) res = optimize_node(res);
1139 current_ir_graph->current_block = res;
1141 IRN_VRFY_IRG(res, current_ir_graph);
1146 /* ***********************************************************************/
1147 /* Methods necessary for automatic Phi node creation */
1149 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1150 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1151 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1152 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1154 Call Graph: ( A ---> B == A "calls" B)
1156 get_value mature_immBlock
1164 get_r_value_internal |
1168 new_rd_Phi0 new_rd_Phi_in
1170 * *************************************************************************** */
1172 /** Creates a Phi node with 0 predecessors */
1173 static INLINE ir_node *
1174 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1178 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1179 IRN_VRFY_IRG(res, irg);
1183 /* There are two implementations of the Phi node construction. The first
1184 is faster, but does not work for blocks with more than 2 predecessors.
1185 The second works always but is slower and causes more unnecessary Phi
1187 Select the implementations by the following preprocessor flag set in
1189 #if USE_FAST_PHI_CONSTRUCTION
1191 /* This is a stack used for allocating and deallocating nodes in
1192 new_rd_Phi_in. The original implementation used the obstack
1193 to model this stack, now it is explicit. This reduces side effects.
1195 #if USE_EXPLICIT_PHI_IN_STACK
1196 INLINE Phi_in_stack *
1197 new_Phi_in_stack(void) {
1200 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1202 res->stack = NEW_ARR_F (ir_node *, 0);
1209 free_Phi_in_stack(Phi_in_stack *s) {
1210 DEL_ARR_F(s->stack);
1214 free_to_Phi_in_stack(ir_node *phi) {
1215 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1216 current_ir_graph->Phi_in_stack->pos)
1217 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1219 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1221 (current_ir_graph->Phi_in_stack->pos)++;
1224 static INLINE ir_node *
1225 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1226 int arity, ir_node **in) {
1228 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1229 int pos = current_ir_graph->Phi_in_stack->pos;
1233 /* We need to allocate a new node */
1234 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1235 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1237 /* reuse the old node and initialize it again. */
1240 assert (res->kind == k_ir_node);
1241 assert (res->op == op_Phi);
1245 assert (arity >= 0);
1246 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1247 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1249 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1251 (current_ir_graph->Phi_in_stack->pos)--;
1255 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1257 /* Creates a Phi node with a given, fixed array **in of predecessors.
1258 If the Phi node is unnecessary, as the same value reaches the block
1259 through all control flow paths, it is eliminated and the value
1260 returned directly. This constructor is only intended for use in
1261 the automatic Phi node generation triggered by get_value or mature.
1262 The implementation is quite tricky and depends on the fact, that
1263 the nodes are allocated on a stack:
1264 The in array contains predecessors and NULLs. The NULLs appear,
1265 if get_r_value_internal, that computed the predecessors, reached
1266 the same block on two paths. In this case the same value reaches
1267 this block on both paths, there is no definition in between. We need
1268 not allocate a Phi where these path's merge, but we have to communicate
1269 this fact to the caller. This happens by returning a pointer to the
1270 node the caller _will_ allocate. (Yes, we predict the address. We can
1271 do so because the nodes are allocated on the obstack.) The caller then
1272 finds a pointer to itself and, when this routine is called again,
1275 static INLINE ir_node *
1276 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1279 ir_node *res, *known;
1281 /* Allocate a new node on the obstack. This can return a node to
1282 which some of the pointers in the in-array already point.
1283 Attention: the constructor copies the in array, i.e., the later
1284 changes to the array in this routine do not affect the
1285 constructed node! If the in array contains NULLs, there will be
1286 missing predecessors in the returned node. Is this a possible
1287 internal state of the Phi node generation? */
1288 #if USE_EXPLICIT_PHI_IN_STACK
1289 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1291 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1292 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1295 /* The in-array can contain NULLs. These were returned by
1296 get_r_value_internal if it reached the same block/definition on a
1297 second path. The NULLs are replaced by the node itself to
1298 simplify the test in the next loop. */
1299 for (i = 0; i < ins; ++i) {
1304 /* This loop checks whether the Phi has more than one predecessor.
1305 If so, it is a real Phi node and we break the loop. Else the Phi
1306 node merges the same definition on several paths and therefore is
1308 for (i = 0; i < ins; ++i)
1310 if (in[i] == res || in[i] == known) continue;
1318 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1320 #if USE_EXPLICIT_PHI_IN_STACK
1321 free_to_Phi_in_stack(res);
1323 obstack_free (current_ir_graph->obst, res);
1327 res = optimize_node (res);
1328 IRN_VRFY_IRG(res, irg);
1331 /* return the pointer to the Phi node. This node might be deallocated! */
1336 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1339 allocates and returns this node. The routine called to allocate the
1340 node might optimize it away and return a real value, or even a pointer
1341 to a deallocated Phi node on top of the obstack!
1342 This function is called with an in-array of proper size. **/
1344 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1346 ir_node *prevBlock, *res;
1349 /* This loop goes to all predecessor blocks of the block the Phi node is in
1350 and there finds the operands of the Phi node by calling
1351 get_r_value_internal. */
1352 for (i = 1; i <= ins; ++i) {
1353 assert (block->in[i]);
1354 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1356 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1359 /* After collecting all predecessors into the array nin a new Phi node
1360 with these predecessors is created. This constructor contains an
1361 optimization: If all predecessors of the Phi node are identical it
1362 returns the only operand instead of a new Phi node. If the value
1363 passes two different control flow edges without being defined, and
1364 this is the second path treated, a pointer to the node that will be
1365 allocated for the first path (recursion) is returned. We already
1366 know the address of this node, as it is the next node to be allocated
1367 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1368 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1370 /* Now we now the value for "pos" and can enter it in the array with
1371 all known local variables. Attention: this might be a pointer to
1372 a node, that later will be allocated!!! See new_rd_Phi_in.
1373 If this is called in mature, after some set_value in the same block,
1374 the proper value must not be overwritten:
1376 get_value (makes Phi0, put's it into graph_arr)
1377 set_value (overwrites Phi0 in graph_arr)
1378 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1381 if (!block->attr.block.graph_arr[pos]) {
1382 block->attr.block.graph_arr[pos] = res;
1384 /* printf(" value already computed by %s\n",
1385 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1391 /* This function returns the last definition of a variable. In case
1392 this variable was last defined in a previous block, Phi nodes are
1393 inserted. If the part of the firm graph containing the definition
1394 is not yet constructed, a dummy Phi node is returned. */
1396 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1399 /* There are 4 cases to treat.
1401 1. The block is not mature and we visit it the first time. We can not
1402 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1403 predecessors is returned. This node is added to the linked list (field
1404 "link") of the containing block to be completed when this block is
1405 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1408 2. The value is already known in this block, graph_arr[pos] is set and we
1409 visit the block the first time. We can return the value without
1410 creating any new nodes.
1412 3. The block is mature and we visit it the first time. A Phi node needs
1413 to be created (phi_merge). If the Phi is not needed, as all it's
1414 operands are the same value reaching the block through different
1415 paths, it's optimized away and the value itself is returned.
1417 4. The block is mature, and we visit it the second time. Now two
1418 subcases are possible:
1419 * The value was computed completely the last time we were here. This
1420 is the case if there is no loop. We can return the proper value.
1421 * The recursion that visited this node and set the flag did not
1422 return yet. We are computing a value in a loop and need to
1423 break the recursion without knowing the result yet.
1424 @@@ strange case. Straight forward we would create a Phi before
1425 starting the computation of it's predecessors. In this case we will
1426 find a Phi here in any case. The problem is that this implementation
1427 only creates a Phi after computing the predecessors, so that it is
1428 hard to compute self references of this Phi. @@@
1429 There is no simple check for the second subcase. Therefore we check
1430 for a second visit and treat all such cases as the second subcase.
1431 Anyways, the basic situation is the same: we reached a block
1432 on two paths without finding a definition of the value: No Phi
1433 nodes are needed on both paths.
1434 We return this information "Two paths, no Phi needed" by a very tricky
1435 implementation that relies on the fact that an obstack is a stack and
1436 will return a node with the same address on different allocations.
1437 Look also at phi_merge and new_rd_phi_in to understand this.
1438 @@@ Unfortunately this does not work, see testprogram
1439 three_cfpred_example.
1443 /* case 4 -- already visited. */
1444 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1446 /* visited the first time */
1447 set_irn_visited(block, get_irg_visited(current_ir_graph));
1449 /* Get the local valid value */
1450 res = block->attr.block.graph_arr[pos];
1452 /* case 2 -- If the value is actually computed, return it. */
1453 if (res) return res;
1455 if (block->attr.block.matured) { /* case 3 */
1457 /* The Phi has the same amount of ins as the corresponding block. */
1458 int ins = get_irn_arity(block);
1460 NEW_ARR_A (ir_node *, nin, ins);
1462 /* Phi merge collects the predecessors and then creates a node. */
1463 res = phi_merge (block, pos, mode, nin, ins);
1465 } else { /* case 1 */
1466 /* The block is not mature, we don't know how many in's are needed. A Phi
1467 with zero predecessors is created. Such a Phi node is called Phi0
1468 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1469 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1471 The Phi0 has to remember the pos of it's internal value. If the real
1472 Phi is computed, pos is used to update the array with the local
1475 res = new_rd_Phi0 (current_ir_graph, block, mode);
1476 res->attr.phi0_pos = pos;
1477 res->link = block->link;
1481 /* If we get here, the frontend missed a use-before-definition error */
1484 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1485 assert (mode->code >= irm_F && mode->code <= irm_P);
1486 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1487 tarval_mode_null[mode->code]);
1490 /* The local valid value is available now. */
1491 block->attr.block.graph_arr[pos] = res;
1499 it starts the recursion. This causes an Id at the entry of
1500 every block that has no definition of the value! **/
1502 #if USE_EXPLICIT_PHI_IN_STACK
1504 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1505 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1508 static INLINE ir_node *
1509 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1510 ir_node **in, int ins, ir_node *phi0)
1513 ir_node *res, *known;
1515 /* Allocate a new node on the obstack. The allocation copies the in
1517 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1518 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1520 /* This loop checks whether the Phi has more than one predecessor.
1521 If so, it is a real Phi node and we break the loop. Else the
1522 Phi node merges the same definition on several paths and therefore
1523 is not needed. Don't consider Bad nodes! */
1525 for (i=0; i < ins; ++i)
1529 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1531 /* Optimize self referencing Phis: We can't detect them yet properly, as
1532 they still refer to the Phi0 they will replace. So replace right now. */
1533 if (phi0 && in[i] == phi0) in[i] = res;
1535 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1543 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1546 obstack_free (current_ir_graph->obst, res);
1547 if (is_Phi(known)) {
1548 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1549 order, an enclosing Phi know may get superfluous. */
1550 res = optimize_in_place_2(known);
1551 if (res != known) { exchange(known, res); }
1556 /* A undefined value, e.g., in unreachable code. */
1560 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1561 IRN_VRFY_IRG(res, irg);
1562 /* Memory Phis in endless loops must be kept alive.
1563 As we can't distinguish these easily we keep all of them alive. */
1564 if ((res->op == op_Phi) && (mode == mode_M))
1565 add_End_keepalive(irg->end, res);
1572 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1574 #if PRECISE_EXC_CONTEXT
1576 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1578 /* Construct a new frag_array for node n.
1579 Copy the content from the current graph_arr of the corresponding block:
1580 this is the current state.
1581 Set ProjM(n) as current memory state.
1582 Further the last entry in frag_arr of current block points to n. This
1583 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1585 static INLINE ir_node ** new_frag_arr (ir_node *n)
1590 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1591 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1592 sizeof(ir_node *)*current_ir_graph->n_loc);
1594 /* turn off optimization before allocating Proj nodes, as res isn't
1596 opt = get_opt_optimize(); set_optimize(0);
1597 /* Here we rely on the fact that all frag ops have Memory as first result! */
1598 if (get_irn_op(n) == op_Call)
1599 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1601 assert((pn_Quot_M == pn_DivMod_M) &&
1602 (pn_Quot_M == pn_Div_M) &&
1603 (pn_Quot_M == pn_Mod_M) &&
1604 (pn_Quot_M == pn_Load_M) &&
1605 (pn_Quot_M == pn_Store_M) &&
1606 (pn_Quot_M == pn_Alloc_M) );
1607 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1611 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1616 * returns the frag_arr from a node
1618 static INLINE ir_node **
1619 get_frag_arr (ir_node *n) {
1620 switch (get_irn_opcode(n)) {
1622 return n->attr.call.exc.frag_arr;
1624 return n->attr.a.exc.frag_arr;
1626 return n->attr.load.exc.frag_arr;
1628 return n->attr.store.exc.frag_arr;
1630 return n->attr.except.frag_arr;
1635 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1637 if (!frag_arr[pos]) frag_arr[pos] = val;
1638 if (frag_arr[current_ir_graph->n_loc - 1]) {
1639 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1640 assert(arr != frag_arr && "Endless recursion detected");
1641 set_frag_value(arr, pos, val);
1646 for (i = 0; i < 1000; ++i) {
1647 if (!frag_arr[pos]) {
1648 frag_arr[pos] = val;
1650 if (frag_arr[current_ir_graph->n_loc - 1]) {
1651 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1657 assert(0 && "potential endless recursion");
1662 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1666 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1668 frag_arr = get_frag_arr(cfOp);
1669 res = frag_arr[pos];
1671 if (block->attr.block.graph_arr[pos]) {
1672 /* There was a set_value after the cfOp and no get_value before that
1673 set_value. We must build a Phi node now. */
1674 if (block->attr.block.matured) {
1675 int ins = get_irn_arity(block);
1677 NEW_ARR_A (ir_node *, nin, ins);
1678 res = phi_merge(block, pos, mode, nin, ins);
1680 res = new_rd_Phi0 (current_ir_graph, block, mode);
1681 res->attr.phi0_pos = pos;
1682 res->link = block->link;
1686 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1687 but this should be better: (remove comment if this works) */
1688 /* It's a Phi, we can write this into all graph_arrs with NULL */
1689 set_frag_value(block->attr.block.graph_arr, pos, res);
1691 res = get_r_value_internal(block, pos, mode);
1692 set_frag_value(block->attr.block.graph_arr, pos, res);
1700 computes the predecessors for the real phi node, and then
1701 allocates and returns this node. The routine called to allocate the
1702 node might optimize it away and return a real value.
1703 This function must be called with an in-array of proper size. **/
1705 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1707 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1710 /* If this block has no value at pos create a Phi0 and remember it
1711 in graph_arr to break recursions.
1712 Else we may not set graph_arr as there a later value is remembered. */
1714 if (!block->attr.block.graph_arr[pos]) {
1715 if (block == get_irg_start_block(current_ir_graph)) {
1716 /* Collapsing to Bad tarvals is no good idea.
1717 So we call a user-supplied routine here that deals with this case as
1718 appropriate for the given language. Sorryly the only help we can give
1719 here is the position.
1721 Even if all variables are defined before use, it can happen that
1722 we get to the start block, if a cond has been replaced by a tuple
1723 (bad, jmp). In this case we call the function needlessly, eventually
1724 generating an non existant error.
1725 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1728 if (default_initialize_local_variable)
1729 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1731 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1732 /* We don't need to care about exception ops in the start block.
1733 There are none by definition. */
1734 return block->attr.block.graph_arr[pos];
1736 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1737 block->attr.block.graph_arr[pos] = phi0;
1738 #if PRECISE_EXC_CONTEXT
1739 if (get_opt_precise_exc_context()) {
1740 /* Set graph_arr for fragile ops. Also here we should break recursion.
1741 We could choose a cyclic path through an cfop. But the recursion would
1742 break at some point. */
1743 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1749 /* This loop goes to all predecessor blocks of the block the Phi node
1750 is in and there finds the operands of the Phi node by calling
1751 get_r_value_internal. */
1752 for (i = 1; i <= ins; ++i) {
1753 prevCfOp = skip_Proj(block->in[i]);
1755 if (is_Bad(prevCfOp)) {
1756 /* In case a Cond has been optimized we would get right to the start block
1757 with an invalid definition. */
1758 nin[i-1] = new_Bad();
1761 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1763 if (!is_Bad(prevBlock)) {
1764 #if PRECISE_EXC_CONTEXT
1765 if (get_opt_precise_exc_context() &&
1766 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1767 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1768 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1771 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1773 nin[i-1] = new_Bad();
1777 /* We want to pass the Phi0 node to the constructor: this finds additional
1778 optimization possibilities.
1779 The Phi0 node either is allocated in this function, or it comes from
1780 a former call to get_r_value_internal. In this case we may not yet
1781 exchange phi0, as this is done in mature_immBlock. */
1783 phi0_all = block->attr.block.graph_arr[pos];
1784 if (!((get_irn_op(phi0_all) == op_Phi) &&
1785 (get_irn_arity(phi0_all) == 0) &&
1786 (get_nodes_block(phi0_all) == block)))
1792 /* After collecting all predecessors into the array nin a new Phi node
1793 with these predecessors is created. This constructor contains an
1794 optimization: If all predecessors of the Phi node are identical it
1795 returns the only operand instead of a new Phi node. */
1796 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1798 /* In case we allocated a Phi0 node at the beginning of this procedure,
1799 we need to exchange this Phi0 with the real Phi. */
1801 exchange(phi0, res);
1802 block->attr.block.graph_arr[pos] = res;
1803 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1804 only an optimization. */
1810 /* This function returns the last definition of a variable. In case
1811 this variable was last defined in a previous block, Phi nodes are
1812 inserted. If the part of the firm graph containing the definition
1813 is not yet constructed, a dummy Phi node is returned. */
1815 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1818 /* There are 4 cases to treat.
1820 1. The block is not mature and we visit it the first time. We can not
1821 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1822 predecessors is returned. This node is added to the linked list (field
1823 "link") of the containing block to be completed when this block is
1824 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1827 2. The value is already known in this block, graph_arr[pos] is set and we
1828 visit the block the first time. We can return the value without
1829 creating any new nodes.
1831 3. The block is mature and we visit it the first time. A Phi node needs
1832 to be created (phi_merge). If the Phi is not needed, as all it's
1833 operands are the same value reaching the block through different
1834 paths, it's optimized away and the value itself is returned.
1836 4. The block is mature, and we visit it the second time. Now two
1837 subcases are possible:
1838 * The value was computed completely the last time we were here. This
1839 is the case if there is no loop. We can return the proper value.
1840 * The recursion that visited this node and set the flag did not
1841 return yet. We are computing a value in a loop and need to
1842 break the recursion. This case only happens if we visited
1843 the same block with phi_merge before, which inserted a Phi0.
1844 So we return the Phi0.
1847 /* case 4 -- already visited. */
1848 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1849 /* As phi_merge allocates a Phi0 this value is always defined. Here
1850 is the critical difference of the two algorithms. */
1851 assert(block->attr.block.graph_arr[pos]);
1852 return block->attr.block.graph_arr[pos];
1855 /* visited the first time */
1856 set_irn_visited(block, get_irg_visited(current_ir_graph));
1858 /* Get the local valid value */
1859 res = block->attr.block.graph_arr[pos];
1861 /* case 2 -- If the value is actually computed, return it. */
1862 if (res) { return res; };
1864 if (block->attr.block.matured) { /* case 3 */
1866 /* The Phi has the same amount of ins as the corresponding block. */
1867 int ins = get_irn_arity(block);
1869 NEW_ARR_A (ir_node *, nin, ins);
1871 /* Phi merge collects the predecessors and then creates a node. */
1872 res = phi_merge (block, pos, mode, nin, ins);
1874 } else { /* case 1 */
1875 /* The block is not mature, we don't know how many in's are needed. A Phi
1876 with zero predecessors is created. Such a Phi node is called Phi0
1877 node. The Phi0 is then added to the list of Phi0 nodes in this block
1878 to be matured by mature_immBlock later.
1879 The Phi0 has to remember the pos of it's internal value. If the real
1880 Phi is computed, pos is used to update the array with the local
1882 res = new_rd_Phi0 (current_ir_graph, block, mode);
1883 res->attr.phi0_pos = pos;
1884 res->link = block->link;
1888 /* If we get here, the frontend missed a use-before-definition error */
1891 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1892 assert (mode->code >= irm_F && mode->code <= irm_P);
1893 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1894 get_mode_null(mode));
1897 /* The local valid value is available now. */
1898 block->attr.block.graph_arr[pos] = res;
1903 #endif /* USE_FAST_PHI_CONSTRUCTION */
1905 /* ************************************************************************** */
1907 /** Finalize a Block node, when all control flows are known. */
1908 /** Acceptable parameters are only Block nodes. */
1910 mature_immBlock (ir_node *block)
1917 assert (get_irn_opcode(block) == iro_Block);
1918 /* @@@ should be commented in
1919 assert (!get_Block_matured(block) && "Block already matured"); */
1921 if (!get_Block_matured(block)) {
1922 ins = ARR_LEN (block->in)-1;
1923 /* Fix block parameters */
1924 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1926 /* An array for building the Phi nodes. */
1927 NEW_ARR_A (ir_node *, nin, ins);
1929 /* Traverse a chain of Phi nodes attached to this block and mature
1931 for (n = block->link; n; n=next) {
1932 inc_irg_visited(current_ir_graph);
1934 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1937 block->attr.block.matured = 1;
1939 /* Now, as the block is a finished firm node, we can optimize it.
1940 Since other nodes have been allocated since the block was created
1941 we can not free the node on the obstack. Therefore we have to call
1943 Unfortunately the optimization does not change a lot, as all allocated
1944 nodes refer to the unoptimized node.
1945 We can call _2, as global cse has no effect on blocks. */
1946 block = optimize_in_place_2(block);
1947 IRN_VRFY_IRG(block, current_ir_graph);
1952 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1954 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1959 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1961 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1966 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1968 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1974 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1976 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1981 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1983 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1988 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1991 assert(arg->op == op_Cond);
1992 arg->attr.c.kind = fragmentary;
1993 arg->attr.c.default_proj = max_proj;
1994 res = new_Proj (arg, mode_X, max_proj);
1999 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2001 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2006 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2008 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2012 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2014 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2019 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2021 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2026 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2028 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2034 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2036 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2041 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2043 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2048 * allocate the frag array
2050 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2051 if (get_opt_precise_exc_context()) {
2052 if ((current_ir_graph->phase_state == phase_building) &&
2053 (get_irn_op(res) == op) && /* Could be optimized away. */
2054 !*frag_store) /* Could be a cse where the arr is already set. */ {
2055 *frag_store = new_frag_arr(res);
2062 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2065 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2067 res->attr.except.pin_state = op_pin_state_pinned;
2068 #if PRECISE_EXC_CONTEXT
2069 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2076 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2079 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2081 res->attr.except.pin_state = op_pin_state_pinned;
2082 #if PRECISE_EXC_CONTEXT
2083 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2090 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2093 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2095 res->attr.except.pin_state = op_pin_state_pinned;
2096 #if PRECISE_EXC_CONTEXT
2097 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2104 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2107 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2109 res->attr.except.pin_state = op_pin_state_pinned;
2110 #if PRECISE_EXC_CONTEXT
2111 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2118 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2120 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2125 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2127 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2132 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2134 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2139 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2141 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2146 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2148 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2153 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2155 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2160 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2162 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2167 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2169 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2174 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2176 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2181 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2183 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2188 new_d_Jmp (dbg_info* db)
2190 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2194 new_d_Cond (dbg_info* db, ir_node *c)
2196 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2200 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2204 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2205 store, callee, arity, in, tp);
2206 #if PRECISE_EXC_CONTEXT
2207 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2214 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2216 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2221 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2223 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2228 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2231 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2233 #if PRECISE_EXC_CONTEXT
2234 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2241 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2244 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2246 #if PRECISE_EXC_CONTEXT
2247 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2254 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2258 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2259 store, size, alloc_type, where);
2260 #if PRECISE_EXC_CONTEXT
2261 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2268 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2270 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2271 store, ptr, size, free_type);
2275 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2276 /* GL: objptr was called frame before. Frame was a bad choice for the name
2277 as the operand could as well be a pointer to a dynamic object. */
2279 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2280 store, objptr, 0, NULL, ent);
2284 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2286 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2287 store, objptr, n_index, index, sel);
2291 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2293 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2294 store, objptr, ent));
2298 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2300 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2305 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2307 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2312 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2314 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2322 return __new_d_Bad();
2326 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2328 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2333 new_d_Unknown (ir_mode *m)
2335 return new_rd_Unknown(current_ir_graph, m);
2339 new_d_CallBegin (dbg_info *db, ir_node *call)
2342 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2347 new_d_EndReg (dbg_info *db)
2350 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2355 new_d_EndExcept (dbg_info *db)
2358 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2363 new_d_Break (dbg_info *db)
2365 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2369 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2371 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2376 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2380 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2381 callee, arity, in, tp);
2386 /* ********************************************************************* */
2387 /* Comfortable interface with automatic Phi node construction. */
2388 /* (Uses also constructors of ?? interface, except new_Block. */
2389 /* ********************************************************************* */
2391 /* * Block construction **/
2392 /* immature Block without predecessors */
2393 ir_node *new_d_immBlock (dbg_info* db) {
2396 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2397 /* creates a new dynamic in-array as length of in is -1 */
2398 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2399 current_ir_graph->current_block = res;
2400 res->attr.block.matured = 0;
2401 /* res->attr.block.exc = exc_normal; */
2402 /* res->attr.block.handler_entry = 0; */
2403 res->attr.block.irg = current_ir_graph;
2404 res->attr.block.backedge = NULL;
2405 res->attr.block.in_cg = NULL;
2406 res->attr.block.cg_backedge = NULL;
2407 set_Block_block_visited(res, 0);
2409 /* Create and initialize array for Phi-node construction. */
2410 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2411 current_ir_graph->n_loc);
2412 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2414 /* Immature block may not be optimized! */
2415 IRN_VRFY_IRG(res, current_ir_graph);
2421 new_immBlock (void) {
2422 return new_d_immBlock(NULL);
2425 /* add an adge to a jmp/control flow node */
2427 add_immBlock_pred (ir_node *block, ir_node *jmp)
2429 if (block->attr.block.matured) {
2430 assert(0 && "Error: Block already matured!\n");
2433 assert(jmp != NULL);
2434 ARR_APP1(ir_node *, block->in, jmp);
2438 /* changing the current block */
2440 set_cur_block (ir_node *target)
2442 current_ir_graph->current_block = target;
2445 /* ************************ */
2446 /* parameter administration */
2448 /* get a value from the parameter array from the current block by its index */
2450 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2452 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2453 inc_irg_visited(current_ir_graph);
2455 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2457 /* get a value from the parameter array from the current block by its index */
2459 get_value (int pos, ir_mode *mode)
2461 return get_d_value(NULL, pos, mode);
2464 /* set a value at position pos in the parameter array from the current block */
2466 set_value (int pos, ir_node *value)
2468 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2469 assert(pos+1 < current_ir_graph->n_loc);
2470 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2473 /* get the current store */
2477 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2478 /* GL: one could call get_value instead */
2479 inc_irg_visited(current_ir_graph);
2480 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2483 /* set the current store */
2485 set_store (ir_node *store)
2487 /* GL: one could call set_value instead */
2488 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2489 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2493 keep_alive (ir_node *ka)
2495 add_End_keepalive(current_ir_graph->end, ka);
2498 /** Useful access routines **/
2499 /* Returns the current block of the current graph. To set the current
2500 block use set_cur_block. */
2501 ir_node *get_cur_block() {
2502 return get_irg_current_block(current_ir_graph);
2505 /* Returns the frame type of the current graph */
2506 type *get_cur_frame_type() {
2507 return get_irg_frame_type(current_ir_graph);
2511 /* ********************************************************************* */
2514 /* call once for each run of the library */
2516 init_cons (default_initialize_local_variable_func_t *func)
2518 default_initialize_local_variable = func;
2521 /* call for each graph */
2523 finalize_cons (ir_graph *irg) {
2524 irg->phase_state = phase_high;
2528 ir_node *new_Block(int arity, ir_node **in) {
2529 return new_d_Block(NULL, arity, in);
2531 ir_node *new_Start (void) {
2532 return new_d_Start(NULL);
2534 ir_node *new_End (void) {
2535 return new_d_End(NULL);
2537 ir_node *new_Jmp (void) {
2538 return new_d_Jmp(NULL);
2540 ir_node *new_Cond (ir_node *c) {
2541 return new_d_Cond(NULL, c);
2543 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2544 return new_d_Return(NULL, store, arity, in);
2546 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2547 return new_d_Raise(NULL, store, obj);
2549 ir_node *new_Const (ir_mode *mode, tarval *con) {
2550 return new_d_Const(NULL, mode, con);
2553 ir_node *new_Const_type(tarval *con, type *tp) {
2554 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2557 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2558 return new_d_SymConst(NULL, value, kind);
2560 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2561 return new_d_simpleSel(NULL, store, objptr, ent);
2563 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2565 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2567 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2568 return new_d_InstOf (NULL, store, objptr, ent);
2570 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2572 return new_d_Call(NULL, store, callee, arity, in, tp);
2574 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2575 return new_d_Add(NULL, op1, op2, mode);
2577 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2578 return new_d_Sub(NULL, op1, op2, mode);
2580 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2581 return new_d_Minus(NULL, op, mode);
2583 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2584 return new_d_Mul(NULL, op1, op2, mode);
2586 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2587 return new_d_Quot(NULL, memop, op1, op2);
2589 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2590 return new_d_DivMod(NULL, memop, op1, op2);
2592 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2593 return new_d_Div(NULL, memop, op1, op2);
2595 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2596 return new_d_Mod(NULL, memop, op1, op2);
2598 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2599 return new_d_Abs(NULL, op, mode);
2601 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2602 return new_d_And(NULL, op1, op2, mode);
2604 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2605 return new_d_Or(NULL, op1, op2, mode);
2607 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2608 return new_d_Eor(NULL, op1, op2, mode);
2610 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2611 return new_d_Not(NULL, op, mode);
2613 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2614 return new_d_Shl(NULL, op, k, mode);
2616 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2617 return new_d_Shr(NULL, op, k, mode);
2619 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2620 return new_d_Shrs(NULL, op, k, mode);
2622 #define new_Rotate new_Rot
2623 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2624 return new_d_Rot(NULL, op, k, mode);
2626 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2627 return new_d_Cmp(NULL, op1, op2);
2629 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2630 return new_d_Conv(NULL, op, mode);
2632 ir_node *new_Cast (ir_node *op, type *to_tp) {
2633 return new_d_Cast(NULL, op, to_tp);
2635 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2636 return new_d_Phi(NULL, arity, in, mode);
2638 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2639 return new_d_Load(NULL, store, addr, mode);
2641 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2642 return new_d_Store(NULL, store, addr, val);
2644 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2645 where_alloc where) {
2646 return new_d_Alloc(NULL, store, size, alloc_type, where);
2648 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2650 return new_d_Free(NULL, store, ptr, size, free_type);
2652 ir_node *new_Sync (int arity, ir_node **in) {
2653 return new_d_Sync(NULL, arity, in);
2655 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2656 return new_d_Proj(NULL, arg, mode, proj);
2658 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2659 return new_d_defaultProj(NULL, arg, max_proj);
2661 ir_node *new_Tuple (int arity, ir_node **in) {
2662 return new_d_Tuple(NULL, arity, in);
2664 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2665 return new_d_Id(NULL, val, mode);
2667 ir_node *new_Bad (void) {
2670 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2671 return new_d_Confirm (NULL, val, bound, cmp);
2673 ir_node *new_Unknown(ir_mode *m) {
2674 return new_d_Unknown(m);
2676 ir_node *new_CallBegin (ir_node *callee) {
2677 return new_d_CallBegin(NULL, callee);
2679 ir_node *new_EndReg (void) {
2680 return new_d_EndReg(NULL);
2682 ir_node *new_EndExcept (void) {
2683 return new_d_EndExcept(NULL);
2685 ir_node *new_Break (void) {
2686 return new_d_Break(NULL);
2688 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2689 return new_d_Filter(NULL, arg, mode, proj);
2691 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2692 return new_d_FuncCall(NULL, callee, arity, in, tp);