3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irgraph_t.h"
29 # include "irnode_t.h"
30 # include "irmode_t.h"
31 # include "ircons_t.h"
32 # include "firm_common_t.h"
38 # include "irbackedge_t.h"
39 # include "irflag_t.h"
41 #if USE_EXPLICIT_PHI_IN_STACK
42 /* A stack needed for the automatic Phi node construction in constructor
43 Phi_in. Redefinition in irgraph.c!! */
48 typedef struct Phi_in_stack Phi_in_stack;
51 /* when we need verifying */
53 # define IRN_VRFY_IRG(res, irg)
55 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
59 * language dependant initialization variable
61 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
63 /* -------------------------------------------- */
64 /* privat interfaces, for professional use only */
65 /* -------------------------------------------- */
67 /* Constructs a Block with a fixed number of predecessors.
68 Does not set current_block. Can not be used with automatic
69 Phi node construction. */
71 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
87 IRN_VRFY_IRG(res, irg);
92 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
96 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
97 /* res->attr.start.irg = irg; */
99 IRN_VRFY_IRG(res, irg);
104 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
108 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
110 IRN_VRFY_IRG(res, irg);
114 /* Creates a Phi node with all predecessors. Calling this constructor
115 is only allowed if the corresponding block is mature. */
117 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
121 bool has_unknown = false;
123 /* Don't assert that block matured: the use of this constructor is strongly
125 if ( get_Block_matured(block) )
126 assert( get_irn_arity(block) == arity );
128 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
130 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
132 for (i = arity-1; i >= 0; i--)
133 if (get_irn_op(in[i]) == op_Unknown) {
138 if (!has_unknown) res = optimize_node (res);
139 IRN_VRFY_IRG(res, irg);
141 /* Memory Phis in endless loops must be kept alive.
142 As we can't distinguish these easily we keep all of them alive. */
143 if ((res->op == op_Phi) && (mode == mode_M))
144 add_End_keepalive(irg->end, res);
149 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
153 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
154 res->attr.con.tv = con;
155 set_Const_type(res, tp); /* Call method because of complex assertion. */
156 res = optimize_node (res);
157 assert(get_Const_type(res) == tp);
158 IRN_VRFY_IRG(res, irg);
164 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
166 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
170 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
174 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
175 res = optimize_node(res);
176 IRN_VRFY_IRG(res, irg);
181 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
186 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
187 res->attr.proj = proj;
190 assert(get_Proj_pred(res));
191 assert(get_nodes_block(get_Proj_pred(res)));
193 res = optimize_node(res);
195 IRN_VRFY_IRG(res, irg);
201 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
205 assert(arg->op == op_Cond);
206 arg->attr.c.kind = fragmentary;
207 arg->attr.c.default_proj = max_proj;
208 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
213 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
217 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
218 res = optimize_node(res);
219 IRN_VRFY_IRG(res, irg);
224 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
228 assert(is_atomic_type(to_tp));
230 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
231 res->attr.cast.totype = to_tp;
232 res = optimize_node(res);
233 IRN_VRFY_IRG(res, irg);
238 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
242 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
243 res = optimize_node (res);
244 IRN_VRFY_IRG(res, irg);
249 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
250 ir_node *op1, ir_node *op2, ir_mode *mode)
257 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
258 res = optimize_node(res);
259 IRN_VRFY_IRG(res, irg);
264 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
265 ir_node *op1, ir_node *op2, ir_mode *mode)
272 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
273 res = optimize_node (res);
274 IRN_VRFY_IRG(res, irg);
279 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
280 ir_node *op, ir_mode *mode)
284 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
285 res = optimize_node(res);
286 IRN_VRFY_IRG(res, irg);
291 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
292 ir_node *op1, ir_node *op2, ir_mode *mode)
299 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
300 res = optimize_node(res);
301 IRN_VRFY_IRG(res, irg);
306 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
307 ir_node *memop, ir_node *op1, ir_node *op2)
315 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
316 res = optimize_node(res);
317 IRN_VRFY_IRG(res, irg);
322 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
323 ir_node *memop, ir_node *op1, ir_node *op2)
331 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
332 res = optimize_node(res);
333 IRN_VRFY_IRG(res, irg);
338 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
339 ir_node *memop, ir_node *op1, ir_node *op2)
347 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
355 ir_node *memop, ir_node *op1, ir_node *op2)
363 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
371 ir_node *op1, ir_node *op2, ir_mode *mode)
378 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
379 res = optimize_node(res);
380 IRN_VRFY_IRG(res, irg);
385 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
386 ir_node *op1, ir_node *op2, ir_mode *mode)
393 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
394 res = optimize_node(res);
395 IRN_VRFY_IRG(res, irg);
400 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
401 ir_node *op1, ir_node *op2, ir_mode *mode)
408 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
409 res = optimize_node (res);
410 IRN_VRFY_IRG(res, irg);
415 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
416 ir_node *op, ir_mode *mode)
420 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
421 res = optimize_node(res);
422 IRN_VRFY_IRG(res, irg);
427 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
428 ir_node *op, ir_node *k, ir_mode *mode)
435 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
436 res = optimize_node(res);
437 IRN_VRFY_IRG(res, irg);
442 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
443 ir_node *op, ir_node *k, ir_mode *mode)
450 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
451 res = optimize_node(res);
452 IRN_VRFY_IRG(res, irg);
457 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
458 ir_node *op, ir_node *k, ir_mode *mode)
465 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
466 res = optimize_node(res);
467 IRN_VRFY_IRG(res, irg);
472 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
473 ir_node *op, ir_node *k, ir_mode *mode)
480 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
481 res = optimize_node(res);
482 IRN_VRFY_IRG(res, irg);
487 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
488 ir_node *op, ir_mode *mode)
492 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
493 res = optimize_node (res);
494 IRN_VRFY_IRG(res, irg);
499 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
500 ir_node *op1, ir_node *op2)
507 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
508 res = optimize_node(res);
509 IRN_VRFY_IRG(res, irg);
514 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
518 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
519 res = optimize_node (res);
520 IRN_VRFY_IRG (res, irg);
525 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
529 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
530 res->attr.c.kind = dense;
531 res->attr.c.default_proj = 0;
532 res = optimize_node (res);
533 IRN_VRFY_IRG(res, irg);
538 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
539 ir_node *callee, int arity, ir_node **in, type *tp)
546 NEW_ARR_A(ir_node *, r_in, r_arity);
549 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
551 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
553 assert((get_unknown_type() == tp) || is_Method_type(tp));
554 set_Call_type(res, tp);
555 res->attr.call.exc.pin_state = op_pin_state_pinned;
556 res->attr.call.callee_arr = NULL;
557 res = optimize_node(res);
558 IRN_VRFY_IRG(res, irg);
563 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
564 ir_node *store, int arity, ir_node **in)
571 NEW_ARR_A (ir_node *, r_in, r_arity);
573 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
574 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
575 res = optimize_node(res);
576 IRN_VRFY_IRG(res, irg);
581 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
588 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
589 res = optimize_node(res);
590 IRN_VRFY_IRG(res, irg);
595 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
596 ir_node *store, ir_node *adr, ir_mode *mode)
603 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
604 res->attr.load.exc.pin_state = op_pin_state_pinned;
605 res->attr.load.load_mode = mode;
606 res->attr.load.volatility = volatility_non_volatile;
607 res = optimize_node(res);
608 IRN_VRFY_IRG(res, irg);
613 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
614 ir_node *store, ir_node *adr, ir_node *val)
622 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
623 res->attr.store.exc.pin_state = op_pin_state_pinned;
624 res->attr.store.volatility = volatility_non_volatile;
625 res = optimize_node(res);
626 IRN_VRFY_IRG(res, irg);
631 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
632 ir_node *size, type *alloc_type, where_alloc where)
639 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
640 res->attr.a.exc.pin_state = op_pin_state_pinned;
641 res->attr.a.where = where;
642 res->attr.a.type = alloc_type;
643 res = optimize_node(res);
644 IRN_VRFY_IRG(res, irg);
649 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
650 ir_node *ptr, ir_node *size, type *free_type)
658 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
659 res->attr.f = free_type;
660 res = optimize_node(res);
661 IRN_VRFY_IRG(res, irg);
666 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
667 int arity, ir_node **in, entity *ent)
673 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
676 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
679 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
680 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
681 res->attr.s.ent = ent;
682 res = optimize_node(res);
683 IRN_VRFY_IRG(res, irg);
688 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
689 ir_node *objptr, type *ent)
696 NEW_ARR_A(ir_node *, r_in, r_arity);
700 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
701 res->attr.io.ent = ent;
703 /* res = optimize(res); */
704 IRN_VRFY_IRG(res, irg);
709 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
710 symconst_kind symkind, type *tp) {
714 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
719 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
721 res->attr.i.num = symkind;
722 res->attr.i.sym = value;
725 res = optimize_node(res);
726 IRN_VRFY_IRG(res, irg);
731 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
732 symconst_kind symkind)
734 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
738 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
739 symconst_symbol sym = {(type *)symbol};
740 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
743 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
744 symconst_symbol sym = {(type *)symbol};
745 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
748 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
749 symconst_symbol sym = {symbol};
750 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
753 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
754 symconst_symbol sym = {symbol};
755 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
759 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
763 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
764 res = optimize_node(res);
765 IRN_VRFY_IRG(res, irg);
770 new_rd_Bad (ir_graph *irg)
776 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
778 ir_node *in[2], *res;
782 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
783 res->attr.confirm_cmp = cmp;
784 res = optimize_node (res);
785 IRN_VRFY_IRG(res, irg);
790 new_rd_Unknown (ir_graph *irg, ir_mode *m)
792 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
796 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
801 in[0] = get_Call_ptr(call);
802 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
803 /* res->attr.callbegin.irg = irg; */
804 res->attr.callbegin.call = call;
805 res = optimize_node(res);
806 IRN_VRFY_IRG(res, irg);
811 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
815 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
817 IRN_VRFY_IRG(res, irg);
822 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
826 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
827 irg->end_except = res;
828 IRN_VRFY_IRG (res, irg);
833 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
837 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
838 res = optimize_node(res);
839 IRN_VRFY_IRG(res, irg);
844 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
849 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
850 res->attr.filter.proj = proj;
851 res->attr.filter.in_cg = NULL;
852 res->attr.filter.backedge = NULL;
855 assert(get_Proj_pred(res));
856 assert(get_nodes_block(get_Proj_pred(res)));
858 res = optimize_node(res);
859 IRN_VRFY_IRG(res, irg);
864 new_rd_NoMem (ir_graph *irg) {
869 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
870 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
879 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
882 res = optimize_node(res);
883 IRN_VRFY_IRG(res, irg);
888 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
889 return new_rd_Block(NULL, irg, arity, in);
891 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
892 return new_rd_Start(NULL, irg, block);
894 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
895 return new_rd_End(NULL, irg, block);
897 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
898 return new_rd_Jmp(NULL, irg, block);
900 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
901 return new_rd_Cond(NULL, irg, block, c);
903 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
904 ir_node *store, int arity, ir_node **in) {
905 return new_rd_Return(NULL, irg, block, store, arity, in);
907 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
908 ir_node *store, ir_node *obj) {
909 return new_rd_Raise(NULL, irg, block, store, obj);
911 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
912 ir_mode *mode, tarval *con) {
913 return new_rd_Const(NULL, irg, block, mode, con);
915 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
916 symconst_symbol value, symconst_kind symkind) {
917 return new_rd_SymConst(NULL, irg, block, value, symkind);
919 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
920 ir_node *objptr, int n_index, ir_node **index,
922 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
924 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
926 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
928 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
929 ir_node *callee, int arity, ir_node **in,
931 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
933 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
934 ir_node *op1, ir_node *op2, ir_mode *mode) {
935 return new_rd_Add(NULL, irg, block, op1, op2, mode);
937 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
938 ir_node *op1, ir_node *op2, ir_mode *mode) {
939 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
941 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
942 ir_node *op, ir_mode *mode) {
943 return new_rd_Minus(NULL, irg, block, op, mode);
945 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
946 ir_node *op1, ir_node *op2, ir_mode *mode) {
947 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
949 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
950 ir_node *memop, ir_node *op1, ir_node *op2) {
951 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
953 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
954 ir_node *memop, ir_node *op1, ir_node *op2) {
955 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
957 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
958 ir_node *memop, ir_node *op1, ir_node *op2) {
959 return new_rd_Div(NULL, irg, block, memop, op1, op2);
961 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
962 ir_node *memop, ir_node *op1, ir_node *op2) {
963 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
965 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
966 ir_node *op, ir_mode *mode) {
967 return new_rd_Abs(NULL, irg, block, op, mode);
969 ir_node *new_r_And (ir_graph *irg, ir_node *block,
970 ir_node *op1, ir_node *op2, ir_mode *mode) {
971 return new_rd_And(NULL, irg, block, op1, op2, mode);
973 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
974 ir_node *op1, ir_node *op2, ir_mode *mode) {
975 return new_rd_Or(NULL, irg, block, op1, op2, mode);
977 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
978 ir_node *op1, ir_node *op2, ir_mode *mode) {
979 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
981 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
982 ir_node *op, ir_mode *mode) {
983 return new_rd_Not(NULL, irg, block, op, mode);
985 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
986 ir_node *op1, ir_node *op2) {
987 return new_rd_Cmp(NULL, irg, block, op1, op2);
989 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
990 ir_node *op, ir_node *k, ir_mode *mode) {
991 return new_rd_Shl(NULL, irg, block, op, k, mode);
993 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
994 ir_node *op, ir_node *k, ir_mode *mode) {
995 return new_rd_Shr(NULL, irg, block, op, k, mode);
997 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
998 ir_node *op, ir_node *k, ir_mode *mode) {
999 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1001 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1002 ir_node *op, ir_node *k, ir_mode *mode) {
1003 return new_rd_Rot(NULL, irg, block, op, k, mode);
1005 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1006 ir_node *op, ir_mode *mode) {
1007 return new_rd_Conv(NULL, irg, block, op, mode);
1009 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1010 return new_rd_Cast(NULL, irg, block, op, to_tp);
1012 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1013 ir_node **in, ir_mode *mode) {
1014 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1016 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1017 ir_node *store, ir_node *adr, ir_mode *mode) {
1018 return new_rd_Load(NULL, irg, block, store, adr, mode);
1020 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1021 ir_node *store, ir_node *adr, ir_node *val) {
1022 return new_rd_Store(NULL, irg, block, store, adr, val);
1024 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1025 ir_node *size, type *alloc_type, where_alloc where) {
1026 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1028 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1029 ir_node *ptr, ir_node *size, type *free_type) {
1030 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1032 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1033 return new_rd_Sync(NULL, irg, block, arity, in);
1035 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1036 ir_mode *mode, long proj) {
1037 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1039 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1041 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1043 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1044 int arity, ir_node **in) {
1045 return new_rd_Tuple(NULL, irg, block, arity, in );
1047 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1048 ir_node *val, ir_mode *mode) {
1049 return new_rd_Id(NULL, irg, block, val, mode);
1051 ir_node *new_r_Bad (ir_graph *irg) {
1052 return new_rd_Bad(irg);
1054 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1055 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1057 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1058 return new_rd_Unknown(irg, m);
1060 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1061 return new_rd_CallBegin(NULL, irg, block, callee);
1063 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1064 return new_rd_EndReg(NULL, irg, block);
1066 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1067 return new_rd_EndExcept(NULL, irg, block);
1069 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1070 return new_rd_Break(NULL, irg, block);
1072 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1073 ir_mode *mode, long proj) {
1074 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1076 ir_node *new_r_NoMem (ir_graph *irg) {
1077 return new_rd_NoMem(irg);
1079 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1080 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1081 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1085 /** ********************/
1086 /** public interfaces */
1087 /** construction tools */
1091 * - create a new Start node in the current block
1093 * @return s - pointer to the created Start node
1098 new_d_Start (dbg_info* db)
1102 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1103 op_Start, mode_T, 0, NULL);
1104 /* res->attr.start.irg = current_ir_graph; */
1106 res = optimize_node(res);
1107 IRN_VRFY_IRG(res, current_ir_graph);
1112 new_d_End (dbg_info* db)
1115 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1116 op_End, mode_X, -1, NULL);
1117 res = optimize_node(res);
1118 IRN_VRFY_IRG(res, current_ir_graph);
1123 /* Constructs a Block with a fixed number of predecessors.
1124 Does set current_block. Can be used with automatic Phi
1125 node construction. */
1127 new_d_Block (dbg_info* db, int arity, ir_node **in)
1131 bool has_unknown = false;
1133 res = new_rd_Block(db, current_ir_graph, arity, in);
1135 /* Create and initialize array for Phi-node construction. */
1136 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1137 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1138 current_ir_graph->n_loc);
1139 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1142 for (i = arity-1; i >= 0; i--)
1143 if (get_irn_op(in[i]) == op_Unknown) {
1148 if (!has_unknown) res = optimize_node(res);
1149 current_ir_graph->current_block = res;
1151 IRN_VRFY_IRG(res, current_ir_graph);
1156 /* ***********************************************************************/
1157 /* Methods necessary for automatic Phi node creation */
1159 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1160 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1161 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1162 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1164 Call Graph: ( A ---> B == A "calls" B)
1166 get_value mature_immBlock
1174 get_r_value_internal |
1178 new_rd_Phi0 new_rd_Phi_in
1180 * *************************************************************************** */
1182 /** Creates a Phi node with 0 predecessors */
1183 static INLINE ir_node *
1184 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1188 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1189 IRN_VRFY_IRG(res, irg);
1193 /* There are two implementations of the Phi node construction. The first
1194 is faster, but does not work for blocks with more than 2 predecessors.
1195 The second works always but is slower and causes more unnecessary Phi
1197 Select the implementations by the following preprocessor flag set in
1199 #if USE_FAST_PHI_CONSTRUCTION
1201 /* This is a stack used for allocating and deallocating nodes in
1202 new_rd_Phi_in. The original implementation used the obstack
1203 to model this stack, now it is explicit. This reduces side effects.
1205 #if USE_EXPLICIT_PHI_IN_STACK
1207 new_Phi_in_stack(void) {
1210 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1212 res->stack = NEW_ARR_F (ir_node *, 0);
1219 free_Phi_in_stack(Phi_in_stack *s) {
1220 DEL_ARR_F(s->stack);
1224 free_to_Phi_in_stack(ir_node *phi) {
1225 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1226 current_ir_graph->Phi_in_stack->pos)
1227 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1229 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1231 (current_ir_graph->Phi_in_stack->pos)++;
1234 static INLINE ir_node *
1235 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1236 int arity, ir_node **in) {
1238 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1239 int pos = current_ir_graph->Phi_in_stack->pos;
1243 /* We need to allocate a new node */
1244 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1245 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1247 /* reuse the old node and initialize it again. */
1250 assert (res->kind == k_ir_node);
1251 assert (res->op == op_Phi);
1255 assert (arity >= 0);
1256 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1257 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1259 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1261 (current_ir_graph->Phi_in_stack->pos)--;
1265 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1267 /* Creates a Phi node with a given, fixed array **in of predecessors.
1268 If the Phi node is unnecessary, as the same value reaches the block
1269 through all control flow paths, it is eliminated and the value
1270 returned directly. This constructor is only intended for use in
1271 the automatic Phi node generation triggered by get_value or mature.
1272 The implementation is quite tricky and depends on the fact, that
1273 the nodes are allocated on a stack:
1274 The in array contains predecessors and NULLs. The NULLs appear,
1275 if get_r_value_internal, that computed the predecessors, reached
1276 the same block on two paths. In this case the same value reaches
1277 this block on both paths, there is no definition in between. We need
1278 not allocate a Phi where these path's merge, but we have to communicate
1279 this fact to the caller. This happens by returning a pointer to the
1280 node the caller _will_ allocate. (Yes, we predict the address. We can
1281 do so because the nodes are allocated on the obstack.) The caller then
1282 finds a pointer to itself and, when this routine is called again,
1285 static INLINE ir_node *
1286 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1289 ir_node *res, *known;
1291 /* Allocate a new node on the obstack. This can return a node to
1292 which some of the pointers in the in-array already point.
1293 Attention: the constructor copies the in array, i.e., the later
1294 changes to the array in this routine do not affect the
1295 constructed node! If the in array contains NULLs, there will be
1296 missing predecessors in the returned node. Is this a possible
1297 internal state of the Phi node generation? */
1298 #if USE_EXPLICIT_PHI_IN_STACK
1299 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1301 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1302 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1305 /* The in-array can contain NULLs. These were returned by
1306 get_r_value_internal if it reached the same block/definition on a
1307 second path. The NULLs are replaced by the node itself to
1308 simplify the test in the next loop. */
1309 for (i = 0; i < ins; ++i) {
1314 /* This loop checks whether the Phi has more than one predecessor.
1315 If so, it is a real Phi node and we break the loop. Else the Phi
1316 node merges the same definition on several paths and therefore is
1318 for (i = 0; i < ins; ++i)
1320 if (in[i] == res || in[i] == known) continue;
1328 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1330 #if USE_EXPLICIT_PHI_IN_STACK
1331 free_to_Phi_in_stack(res);
1333 obstack_free (current_ir_graph->obst, res);
1337 res = optimize_node (res);
1338 IRN_VRFY_IRG(res, irg);
1341 /* return the pointer to the Phi node. This node might be deallocated! */
1346 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1349 allocates and returns this node. The routine called to allocate the
1350 node might optimize it away and return a real value, or even a pointer
1351 to a deallocated Phi node on top of the obstack!
1352 This function is called with an in-array of proper size. **/
1354 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1356 ir_node *prevBlock, *res;
1359 /* This loop goes to all predecessor blocks of the block the Phi node is in
1360 and there finds the operands of the Phi node by calling
1361 get_r_value_internal. */
1362 for (i = 1; i <= ins; ++i) {
1363 assert (block->in[i]);
1364 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1366 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1369 /* After collecting all predecessors into the array nin a new Phi node
1370 with these predecessors is created. This constructor contains an
1371 optimization: If all predecessors of the Phi node are identical it
1372 returns the only operand instead of a new Phi node. If the value
1373 passes two different control flow edges without being defined, and
1374 this is the second path treated, a pointer to the node that will be
1375 allocated for the first path (recursion) is returned. We already
1376 know the address of this node, as it is the next node to be allocated
1377 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1378 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1380 /* Now we now the value for "pos" and can enter it in the array with
1381 all known local variables. Attention: this might be a pointer to
1382 a node, that later will be allocated!!! See new_rd_Phi_in.
1383 If this is called in mature, after some set_value in the same block,
1384 the proper value must not be overwritten:
1386 get_value (makes Phi0, put's it into graph_arr)
1387 set_value (overwrites Phi0 in graph_arr)
1388 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1391 if (!block->attr.block.graph_arr[pos]) {
1392 block->attr.block.graph_arr[pos] = res;
1394 /* printf(" value already computed by %s\n",
1395 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1401 /* This function returns the last definition of a variable. In case
1402 this variable was last defined in a previous block, Phi nodes are
1403 inserted. If the part of the firm graph containing the definition
1404 is not yet constructed, a dummy Phi node is returned. */
1406 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1409 /* There are 4 cases to treat.
1411 1. The block is not mature and we visit it the first time. We can not
1412 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1413 predecessors is returned. This node is added to the linked list (field
1414 "link") of the containing block to be completed when this block is
1415 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1418 2. The value is already known in this block, graph_arr[pos] is set and we
1419 visit the block the first time. We can return the value without
1420 creating any new nodes.
1422 3. The block is mature and we visit it the first time. A Phi node needs
1423 to be created (phi_merge). If the Phi is not needed, as all it's
1424 operands are the same value reaching the block through different
1425 paths, it's optimized away and the value itself is returned.
1427 4. The block is mature, and we visit it the second time. Now two
1428 subcases are possible:
1429 * The value was computed completely the last time we were here. This
1430 is the case if there is no loop. We can return the proper value.
1431 * The recursion that visited this node and set the flag did not
1432 return yet. We are computing a value in a loop and need to
1433 break the recursion without knowing the result yet.
1434 @@@ strange case. Straight forward we would create a Phi before
1435 starting the computation of it's predecessors. In this case we will
1436 find a Phi here in any case. The problem is that this implementation
1437 only creates a Phi after computing the predecessors, so that it is
1438 hard to compute self references of this Phi. @@@
1439 There is no simple check for the second subcase. Therefore we check
1440 for a second visit and treat all such cases as the second subcase.
1441 Anyways, the basic situation is the same: we reached a block
1442 on two paths without finding a definition of the value: No Phi
1443 nodes are needed on both paths.
1444 We return this information "Two paths, no Phi needed" by a very tricky
1445 implementation that relies on the fact that an obstack is a stack and
1446 will return a node with the same address on different allocations.
1447 Look also at phi_merge and new_rd_phi_in to understand this.
1448 @@@ Unfortunately this does not work, see testprogram
1449 three_cfpred_example.
1453 /* case 4 -- already visited. */
1454 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1456 /* visited the first time */
1457 set_irn_visited(block, get_irg_visited(current_ir_graph));
1459 /* Get the local valid value */
1460 res = block->attr.block.graph_arr[pos];
1462 /* case 2 -- If the value is actually computed, return it. */
1463 if (res) return res;
1465 if (block->attr.block.matured) { /* case 3 */
1467 /* The Phi has the same amount of ins as the corresponding block. */
1468 int ins = get_irn_arity(block);
1470 NEW_ARR_A (ir_node *, nin, ins);
1472 /* Phi merge collects the predecessors and then creates a node. */
1473 res = phi_merge (block, pos, mode, nin, ins);
1475 } else { /* case 1 */
1476 /* The block is not mature, we don't know how many in's are needed. A Phi
1477 with zero predecessors is created. Such a Phi node is called Phi0
1478 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1479 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1481 The Phi0 has to remember the pos of it's internal value. If the real
1482 Phi is computed, pos is used to update the array with the local
1485 res = new_rd_Phi0 (current_ir_graph, block, mode);
1486 res->attr.phi0_pos = pos;
1487 res->link = block->link;
1491 /* If we get here, the frontend missed a use-before-definition error */
1494 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1495 assert (mode->code >= irm_F && mode->code <= irm_P);
1496 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1497 tarval_mode_null[mode->code]);
1500 /* The local valid value is available now. */
1501 block->attr.block.graph_arr[pos] = res;
1509 it starts the recursion. This causes an Id at the entry of
1510 every block that has no definition of the value! **/
1512 #if USE_EXPLICIT_PHI_IN_STACK
1514 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1515 void free_Phi_in_stack(Phi_in_stack *s) { }
1518 static INLINE ir_node *
1519 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1520 ir_node **in, int ins, ir_node *phi0)
1523 ir_node *res, *known;
1525 /* Allocate a new node on the obstack. The allocation copies the in
1527 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1528 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1530 /* This loop checks whether the Phi has more than one predecessor.
1531 If so, it is a real Phi node and we break the loop. Else the
1532 Phi node merges the same definition on several paths and therefore
1533 is not needed. Don't consider Bad nodes! */
1535 for (i=0; i < ins; ++i)
1539 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1541 /* Optimize self referencing Phis: We can't detect them yet properly, as
1542 they still refer to the Phi0 they will replace. So replace right now. */
1543 if (phi0 && in[i] == phi0) in[i] = res;
1545 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1553 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1556 obstack_free (current_ir_graph->obst, res);
1557 if (is_Phi(known)) {
1558 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1559 order, an enclosing Phi know may get superfluous. */
1560 res = optimize_in_place_2(known);
1561 if (res != known) { exchange(known, res); }
1566 /* A undefined value, e.g., in unreachable code. */
1570 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1571 IRN_VRFY_IRG(res, irg);
1572 /* Memory Phis in endless loops must be kept alive.
1573 As we can't distinguish these easily we keep all of them alive. */
1574 if ((res->op == op_Phi) && (mode == mode_M))
1575 add_End_keepalive(irg->end, res);
1582 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1584 #if PRECISE_EXC_CONTEXT
1586 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1588 /* Construct a new frag_array for node n.
1589 Copy the content from the current graph_arr of the corresponding block:
1590 this is the current state.
1591 Set ProjM(n) as current memory state.
1592 Further the last entry in frag_arr of current block points to n. This
1593 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1595 static INLINE ir_node ** new_frag_arr (ir_node *n)
1600 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1601 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1602 sizeof(ir_node *)*current_ir_graph->n_loc);
1604 /* turn off optimization before allocating Proj nodes, as res isn't
1606 opt = get_opt_optimize(); set_optimize(0);
1607 /* Here we rely on the fact that all frag ops have Memory as first result! */
1608 if (get_irn_op(n) == op_Call)
1609 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1611 assert((pn_Quot_M == pn_DivMod_M) &&
1612 (pn_Quot_M == pn_Div_M) &&
1613 (pn_Quot_M == pn_Mod_M) &&
1614 (pn_Quot_M == pn_Load_M) &&
1615 (pn_Quot_M == pn_Store_M) &&
1616 (pn_Quot_M == pn_Alloc_M) );
1617 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1621 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1626 * returns the frag_arr from a node
1628 static INLINE ir_node **
1629 get_frag_arr (ir_node *n) {
1630 switch (get_irn_opcode(n)) {
1632 return n->attr.call.exc.frag_arr;
1634 return n->attr.a.exc.frag_arr;
1636 return n->attr.load.exc.frag_arr;
1638 return n->attr.store.exc.frag_arr;
1640 return n->attr.except.frag_arr;
1645 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1647 if (!frag_arr[pos]) frag_arr[pos] = val;
1648 if (frag_arr[current_ir_graph->n_loc - 1]) {
1649 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1650 assert(arr != frag_arr && "Endless recursion detected");
1651 set_frag_value(arr, pos, val);
1656 for (i = 0; i < 1000; ++i) {
1657 if (!frag_arr[pos]) {
1658 frag_arr[pos] = val;
1660 if (frag_arr[current_ir_graph->n_loc - 1]) {
1661 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1667 assert(0 && "potential endless recursion");
1672 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1676 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1678 frag_arr = get_frag_arr(cfOp);
1679 res = frag_arr[pos];
1681 if (block->attr.block.graph_arr[pos]) {
1682 /* There was a set_value after the cfOp and no get_value before that
1683 set_value. We must build a Phi node now. */
1684 if (block->attr.block.matured) {
1685 int ins = get_irn_arity(block);
1687 NEW_ARR_A (ir_node *, nin, ins);
1688 res = phi_merge(block, pos, mode, nin, ins);
1690 res = new_rd_Phi0 (current_ir_graph, block, mode);
1691 res->attr.phi0_pos = pos;
1692 res->link = block->link;
1696 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1697 but this should be better: (remove comment if this works) */
1698 /* It's a Phi, we can write this into all graph_arrs with NULL */
1699 set_frag_value(block->attr.block.graph_arr, pos, res);
1701 res = get_r_value_internal(block, pos, mode);
1702 set_frag_value(block->attr.block.graph_arr, pos, res);
1710 computes the predecessors for the real phi node, and then
1711 allocates and returns this node. The routine called to allocate the
1712 node might optimize it away and return a real value.
1713 This function must be called with an in-array of proper size. **/
1715 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1717 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1720 /* If this block has no value at pos create a Phi0 and remember it
1721 in graph_arr to break recursions.
1722 Else we may not set graph_arr as there a later value is remembered. */
1724 if (!block->attr.block.graph_arr[pos]) {
1725 if (block == get_irg_start_block(current_ir_graph)) {
1726 /* Collapsing to Bad tarvals is no good idea.
1727 So we call a user-supplied routine here that deals with this case as
1728 appropriate for the given language. Sorryly the only help we can give
1729 here is the position.
1731 Even if all variables are defined before use, it can happen that
1732 we get to the start block, if a cond has been replaced by a tuple
1733 (bad, jmp). In this case we call the function needlessly, eventually
1734 generating an non existant error.
1735 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1738 if (default_initialize_local_variable)
1739 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1741 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1742 /* We don't need to care about exception ops in the start block.
1743 There are none by definition. */
1744 return block->attr.block.graph_arr[pos];
1746 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1747 block->attr.block.graph_arr[pos] = phi0;
1748 #if PRECISE_EXC_CONTEXT
1749 if (get_opt_precise_exc_context()) {
1750 /* Set graph_arr for fragile ops. Also here we should break recursion.
1751 We could choose a cyclic path through an cfop. But the recursion would
1752 break at some point. */
1753 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1759 /* This loop goes to all predecessor blocks of the block the Phi node
1760 is in and there finds the operands of the Phi node by calling
1761 get_r_value_internal. */
1762 for (i = 1; i <= ins; ++i) {
1763 prevCfOp = skip_Proj(block->in[i]);
1765 if (is_Bad(prevCfOp)) {
1766 /* In case a Cond has been optimized we would get right to the start block
1767 with an invalid definition. */
1768 nin[i-1] = new_Bad();
1771 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1773 if (!is_Bad(prevBlock)) {
1774 #if PRECISE_EXC_CONTEXT
1775 if (get_opt_precise_exc_context() &&
1776 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1777 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1778 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1781 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1783 nin[i-1] = new_Bad();
1787 /* We want to pass the Phi0 node to the constructor: this finds additional
1788 optimization possibilities.
1789 The Phi0 node either is allocated in this function, or it comes from
1790 a former call to get_r_value_internal. In this case we may not yet
1791 exchange phi0, as this is done in mature_immBlock. */
1793 phi0_all = block->attr.block.graph_arr[pos];
1794 if (!((get_irn_op(phi0_all) == op_Phi) &&
1795 (get_irn_arity(phi0_all) == 0) &&
1796 (get_nodes_block(phi0_all) == block)))
1802 /* After collecting all predecessors into the array nin a new Phi node
1803 with these predecessors is created. This constructor contains an
1804 optimization: If all predecessors of the Phi node are identical it
1805 returns the only operand instead of a new Phi node. */
1806 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1808 /* In case we allocated a Phi0 node at the beginning of this procedure,
1809 we need to exchange this Phi0 with the real Phi. */
1811 exchange(phi0, res);
1812 block->attr.block.graph_arr[pos] = res;
1813 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1814 only an optimization. */
1820 /* This function returns the last definition of a variable. In case
1821 this variable was last defined in a previous block, Phi nodes are
1822 inserted. If the part of the firm graph containing the definition
1823 is not yet constructed, a dummy Phi node is returned. */
1825 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1828 /* There are 4 cases to treat.
1830 1. The block is not mature and we visit it the first time. We can not
1831 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1832 predecessors is returned. This node is added to the linked list (field
1833 "link") of the containing block to be completed when this block is
1834 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1837 2. The value is already known in this block, graph_arr[pos] is set and we
1838 visit the block the first time. We can return the value without
1839 creating any new nodes.
1841 3. The block is mature and we visit it the first time. A Phi node needs
1842 to be created (phi_merge). If the Phi is not needed, as all it's
1843 operands are the same value reaching the block through different
1844 paths, it's optimized away and the value itself is returned.
1846 4. The block is mature, and we visit it the second time. Now two
1847 subcases are possible:
1848 * The value was computed completely the last time we were here. This
1849 is the case if there is no loop. We can return the proper value.
1850 * The recursion that visited this node and set the flag did not
1851 return yet. We are computing a value in a loop and need to
1852 break the recursion. This case only happens if we visited
1853 the same block with phi_merge before, which inserted a Phi0.
1854 So we return the Phi0.
1857 /* case 4 -- already visited. */
1858 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1859 /* As phi_merge allocates a Phi0 this value is always defined. Here
1860 is the critical difference of the two algorithms. */
1861 assert(block->attr.block.graph_arr[pos]);
1862 return block->attr.block.graph_arr[pos];
1865 /* visited the first time */
1866 set_irn_visited(block, get_irg_visited(current_ir_graph));
1868 /* Get the local valid value */
1869 res = block->attr.block.graph_arr[pos];
1871 /* case 2 -- If the value is actually computed, return it. */
1872 if (res) { return res; };
1874 if (block->attr.block.matured) { /* case 3 */
1876 /* The Phi has the same amount of ins as the corresponding block. */
1877 int ins = get_irn_arity(block);
1879 NEW_ARR_A (ir_node *, nin, ins);
1881 /* Phi merge collects the predecessors and then creates a node. */
1882 res = phi_merge (block, pos, mode, nin, ins);
1884 } else { /* case 1 */
1885 /* The block is not mature, we don't know how many in's are needed. A Phi
1886 with zero predecessors is created. Such a Phi node is called Phi0
1887 node. The Phi0 is then added to the list of Phi0 nodes in this block
1888 to be matured by mature_immBlock later.
1889 The Phi0 has to remember the pos of it's internal value. If the real
1890 Phi is computed, pos is used to update the array with the local
1892 res = new_rd_Phi0 (current_ir_graph, block, mode);
1893 res->attr.phi0_pos = pos;
1894 res->link = block->link;
1898 /* If we get here, the frontend missed a use-before-definition error */
1901 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1902 assert (mode->code >= irm_F && mode->code <= irm_P);
1903 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1904 get_mode_null(mode));
1907 /* The local valid value is available now. */
1908 block->attr.block.graph_arr[pos] = res;
1913 #endif /* USE_FAST_PHI_CONSTRUCTION */
1915 /* ************************************************************************** */
1917 /** Finalize a Block node, when all control flows are known. */
1918 /** Acceptable parameters are only Block nodes. */
1920 mature_immBlock (ir_node *block)
1927 assert (get_irn_opcode(block) == iro_Block);
1928 /* @@@ should be commented in
1929 assert (!get_Block_matured(block) && "Block already matured"); */
1931 if (!get_Block_matured(block)) {
1932 ins = ARR_LEN (block->in)-1;
1933 /* Fix block parameters */
1934 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1936 /* An array for building the Phi nodes. */
1937 NEW_ARR_A (ir_node *, nin, ins);
1939 /* Traverse a chain of Phi nodes attached to this block and mature
1941 for (n = block->link; n; n=next) {
1942 inc_irg_visited(current_ir_graph);
1944 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1947 block->attr.block.matured = 1;
1949 /* Now, as the block is a finished firm node, we can optimize it.
1950 Since other nodes have been allocated since the block was created
1951 we can not free the node on the obstack. Therefore we have to call
1953 Unfortunately the optimization does not change a lot, as all allocated
1954 nodes refer to the unoptimized node.
1955 We can call _2, as global cse has no effect on blocks. */
1956 block = optimize_in_place_2(block);
1957 IRN_VRFY_IRG(block, current_ir_graph);
1962 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1964 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1969 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1971 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1976 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1978 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1984 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1986 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1991 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1993 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1998 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2001 assert(arg->op == op_Cond);
2002 arg->attr.c.kind = fragmentary;
2003 arg->attr.c.default_proj = max_proj;
2004 res = new_Proj (arg, mode_X, max_proj);
2009 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2011 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2016 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2018 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2022 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2024 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2029 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2031 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2036 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2038 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2044 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2046 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2051 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2053 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2058 * allocate the frag array
2060 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2061 if (get_opt_precise_exc_context()) {
2062 if ((current_ir_graph->phase_state == phase_building) &&
2063 (get_irn_op(res) == op) && /* Could be optimized away. */
2064 !*frag_store) /* Could be a cse where the arr is already set. */ {
2065 *frag_store = new_frag_arr(res);
2072 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2075 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2077 res->attr.except.pin_state = op_pin_state_pinned;
2078 #if PRECISE_EXC_CONTEXT
2079 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2086 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2089 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2091 res->attr.except.pin_state = op_pin_state_pinned;
2092 #if PRECISE_EXC_CONTEXT
2093 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2100 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2103 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2105 res->attr.except.pin_state = op_pin_state_pinned;
2106 #if PRECISE_EXC_CONTEXT
2107 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2114 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2117 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2119 res->attr.except.pin_state = op_pin_state_pinned;
2120 #if PRECISE_EXC_CONTEXT
2121 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2128 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2130 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2135 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2137 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2142 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2144 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2149 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2151 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2156 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2158 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2163 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2165 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2170 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2172 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2177 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2179 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2184 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2186 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2191 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2193 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2198 new_d_Jmp (dbg_info* db)
2200 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2204 new_d_Cond (dbg_info* db, ir_node *c)
2206 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2210 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2214 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2215 store, callee, arity, in, tp);
2216 #if PRECISE_EXC_CONTEXT
2217 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2224 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2226 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2231 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2233 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2238 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2241 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2243 #if PRECISE_EXC_CONTEXT
2244 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2251 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2254 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2256 #if PRECISE_EXC_CONTEXT
2257 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2264 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2268 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2269 store, size, alloc_type, where);
2270 #if PRECISE_EXC_CONTEXT
2271 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2278 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2280 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2281 store, ptr, size, free_type);
2285 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2286 /* GL: objptr was called frame before. Frame was a bad choice for the name
2287 as the operand could as well be a pointer to a dynamic object. */
2289 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2290 store, objptr, 0, NULL, ent);
2294 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2296 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2297 store, objptr, n_index, index, sel);
2301 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2303 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2304 store, objptr, ent));
2308 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2310 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2315 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2317 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2322 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2324 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2332 return __new_d_Bad();
2336 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2338 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2343 new_d_Unknown (ir_mode *m)
2345 return new_rd_Unknown(current_ir_graph, m);
2349 new_d_CallBegin (dbg_info *db, ir_node *call)
2352 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2357 new_d_EndReg (dbg_info *db)
2360 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2365 new_d_EndExcept (dbg_info *db)
2368 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2373 new_d_Break (dbg_info *db)
2375 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2379 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2381 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2388 return __new_d_NoMem();
2392 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2393 ir_node *ir_true, ir_mode *mode) {
2394 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2395 sel, ir_false, ir_true, mode);
2398 /* ********************************************************************* */
2399 /* Comfortable interface with automatic Phi node construction. */
2400 /* (Uses also constructors of ?? interface, except new_Block. */
2401 /* ********************************************************************* */
2403 /* * Block construction **/
2404 /* immature Block without predecessors */
2405 ir_node *new_d_immBlock (dbg_info* db) {
2408 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2409 /* creates a new dynamic in-array as length of in is -1 */
2410 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2411 current_ir_graph->current_block = res;
2412 res->attr.block.matured = 0;
2413 res->attr.block.dead = 0;
2414 /* res->attr.block.exc = exc_normal; */
2415 /* res->attr.block.handler_entry = 0; */
2416 res->attr.block.irg = current_ir_graph;
2417 res->attr.block.backedge = NULL;
2418 res->attr.block.in_cg = NULL;
2419 res->attr.block.cg_backedge = NULL;
2420 set_Block_block_visited(res, 0);
2422 /* Create and initialize array for Phi-node construction. */
2423 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2424 current_ir_graph->n_loc);
2425 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2427 /* Immature block may not be optimized! */
2428 IRN_VRFY_IRG(res, current_ir_graph);
2434 new_immBlock (void) {
2435 return new_d_immBlock(NULL);
2438 /* add an adge to a jmp/control flow node */
2440 add_immBlock_pred (ir_node *block, ir_node *jmp)
2442 if (block->attr.block.matured) {
2443 assert(0 && "Error: Block already matured!\n");
2446 assert(jmp != NULL);
2447 ARR_APP1(ir_node *, block->in, jmp);
2451 /* changing the current block */
2453 set_cur_block (ir_node *target)
2455 current_ir_graph->current_block = target;
2458 /* ************************ */
2459 /* parameter administration */
2461 /* get a value from the parameter array from the current block by its index */
2463 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2465 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2466 inc_irg_visited(current_ir_graph);
2468 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2470 /* get a value from the parameter array from the current block by its index */
2472 get_value (int pos, ir_mode *mode)
2474 return get_d_value(NULL, pos, mode);
2477 /* set a value at position pos in the parameter array from the current block */
2479 set_value (int pos, ir_node *value)
2481 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2482 assert(pos+1 < current_ir_graph->n_loc);
2483 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2486 /* get the current store */
2490 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2491 /* GL: one could call get_value instead */
2492 inc_irg_visited(current_ir_graph);
2493 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2496 /* set the current store */
2498 set_store (ir_node *store)
2500 /* GL: one could call set_value instead */
2501 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2502 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2506 keep_alive (ir_node *ka)
2508 add_End_keepalive(current_ir_graph->end, ka);
2511 /** Useful access routines **/
2512 /* Returns the current block of the current graph. To set the current
2513 block use set_cur_block. */
2514 ir_node *get_cur_block() {
2515 return get_irg_current_block(current_ir_graph);
2518 /* Returns the frame type of the current graph */
2519 type *get_cur_frame_type() {
2520 return get_irg_frame_type(current_ir_graph);
2524 /* ********************************************************************* */
2527 /* call once for each run of the library */
2529 init_cons (default_initialize_local_variable_func_t *func)
2531 default_initialize_local_variable = func;
2534 /* call for each graph */
2536 finalize_cons (ir_graph *irg) {
2537 irg->phase_state = phase_high;
2541 ir_node *new_Block(int arity, ir_node **in) {
2542 return new_d_Block(NULL, arity, in);
2544 ir_node *new_Start (void) {
2545 return new_d_Start(NULL);
2547 ir_node *new_End (void) {
2548 return new_d_End(NULL);
2550 ir_node *new_Jmp (void) {
2551 return new_d_Jmp(NULL);
2553 ir_node *new_Cond (ir_node *c) {
2554 return new_d_Cond(NULL, c);
2556 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2557 return new_d_Return(NULL, store, arity, in);
2559 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2560 return new_d_Raise(NULL, store, obj);
2562 ir_node *new_Const (ir_mode *mode, tarval *con) {
2563 return new_d_Const(NULL, mode, con);
2566 ir_node *new_Const_type(tarval *con, type *tp) {
2567 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2570 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2571 return new_d_SymConst(NULL, value, kind);
2573 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2574 return new_d_simpleSel(NULL, store, objptr, ent);
2576 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2578 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2580 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2581 return new_d_InstOf (NULL, store, objptr, ent);
2583 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2585 return new_d_Call(NULL, store, callee, arity, in, tp);
2587 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2588 return new_d_Add(NULL, op1, op2, mode);
2590 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2591 return new_d_Sub(NULL, op1, op2, mode);
2593 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2594 return new_d_Minus(NULL, op, mode);
2596 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2597 return new_d_Mul(NULL, op1, op2, mode);
2599 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2600 return new_d_Quot(NULL, memop, op1, op2);
2602 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2603 return new_d_DivMod(NULL, memop, op1, op2);
2605 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2606 return new_d_Div(NULL, memop, op1, op2);
2608 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2609 return new_d_Mod(NULL, memop, op1, op2);
2611 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2612 return new_d_Abs(NULL, op, mode);
2614 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2615 return new_d_And(NULL, op1, op2, mode);
2617 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2618 return new_d_Or(NULL, op1, op2, mode);
2620 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2621 return new_d_Eor(NULL, op1, op2, mode);
2623 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2624 return new_d_Not(NULL, op, mode);
2626 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2627 return new_d_Shl(NULL, op, k, mode);
2629 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2630 return new_d_Shr(NULL, op, k, mode);
2632 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2633 return new_d_Shrs(NULL, op, k, mode);
2635 #define new_Rotate new_Rot
2636 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2637 return new_d_Rot(NULL, op, k, mode);
2639 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2640 return new_d_Cmp(NULL, op1, op2);
2642 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2643 return new_d_Conv(NULL, op, mode);
2645 ir_node *new_Cast (ir_node *op, type *to_tp) {
2646 return new_d_Cast(NULL, op, to_tp);
2648 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2649 return new_d_Phi(NULL, arity, in, mode);
2651 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2652 return new_d_Load(NULL, store, addr, mode);
2654 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2655 return new_d_Store(NULL, store, addr, val);
2657 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2658 where_alloc where) {
2659 return new_d_Alloc(NULL, store, size, alloc_type, where);
2661 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2663 return new_d_Free(NULL, store, ptr, size, free_type);
2665 ir_node *new_Sync (int arity, ir_node **in) {
2666 return new_d_Sync(NULL, arity, in);
2668 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2669 return new_d_Proj(NULL, arg, mode, proj);
2671 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2672 return new_d_defaultProj(NULL, arg, max_proj);
2674 ir_node *new_Tuple (int arity, ir_node **in) {
2675 return new_d_Tuple(NULL, arity, in);
2677 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2678 return new_d_Id(NULL, val, mode);
2680 ir_node *new_Bad (void) {
2683 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2684 return new_d_Confirm (NULL, val, bound, cmp);
2686 ir_node *new_Unknown(ir_mode *m) {
2687 return new_d_Unknown(m);
2689 ir_node *new_CallBegin (ir_node *callee) {
2690 return new_d_CallBegin(NULL, callee);
2692 ir_node *new_EndReg (void) {
2693 return new_d_EndReg(NULL);
2695 ir_node *new_EndExcept (void) {
2696 return new_d_EndExcept(NULL);
2698 ir_node *new_Break (void) {
2699 return new_d_Break(NULL);
2701 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2702 return new_d_Filter(NULL, arg, mode, proj);
2704 ir_node *new_NoMem (void) {
2705 return new_d_NoMem();
2707 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2708 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);