3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irgraph_t.h"
29 # include "irnode_t.h"
30 # include "irmode_t.h"
31 # include "ircons_t.h"
32 # include "firm_common_t.h"
38 # include "irbackedge_t.h"
39 # include "irflag_t.h"
40 # include "iredges_t.h"
42 #if USE_EXPLICIT_PHI_IN_STACK
43 /* A stack needed for the automatic Phi node construction in constructor
44 Phi_in. Redefinition in irgraph.c!! */
49 typedef struct Phi_in_stack Phi_in_stack;
52 /* when we need verifying */
54 # define IRN_VRFY_IRG(res, irg)
56 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
60 * language dependant initialization variable
62 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
64 /* -------------------------------------------- */
65 /* privat interfaces, for professional use only */
66 /* -------------------------------------------- */
68 /* Constructs a Block with a fixed number of predecessors.
69 Does not set current_block. Can not be used with automatic
70 Phi node construction. */
72 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
76 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
77 set_Block_matured(res, 1);
78 set_Block_block_visited(res, 0);
80 /* res->attr.block.exc = exc_normal; */
81 /* res->attr.block.handler_entry = 0; */
82 res->attr.block.dead = 0;
83 res->attr.block.irg = irg;
84 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
85 res->attr.block.in_cg = NULL;
86 res->attr.block.cg_backedge = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
97 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
98 /* res->attr.start.irg = irg; */
100 IRN_VRFY_IRG(res, irg);
105 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
109 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
111 IRN_VRFY_IRG(res, irg);
115 /* Creates a Phi node with all predecessors. Calling this constructor
116 is only allowed if the corresponding block is mature. */
118 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
122 bool has_unknown = false;
124 /* Don't assert that block matured: the use of this constructor is strongly
126 if ( get_Block_matured(block) )
127 assert( get_irn_arity(block) == arity );
129 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
131 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
133 for (i = arity-1; i >= 0; i--)
134 if (get_irn_op(in[i]) == op_Unknown) {
139 if (!has_unknown) res = optimize_node (res);
140 IRN_VRFY_IRG(res, irg);
142 /* Memory Phis in endless loops must be kept alive.
143 As we can't distinguish these easily we keep all of them alive. */
144 if ((res->op == op_Phi) && (mode == mode_M))
145 add_End_keepalive(irg->end, res);
150 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
154 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
155 res->attr.con.tv = con;
156 set_Const_type(res, tp); /* Call method because of complex assertion. */
157 res = optimize_node (res);
158 assert(get_Const_type(res) == tp);
159 IRN_VRFY_IRG(res, irg);
165 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
167 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
171 new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
173 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
177 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
181 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
182 res = optimize_node(res);
183 IRN_VRFY_IRG(res, irg);
188 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
193 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
194 res->attr.proj = proj;
197 assert(get_Proj_pred(res));
198 assert(get_nodes_block(get_Proj_pred(res)));
200 res = optimize_node(res);
202 IRN_VRFY_IRG(res, irg);
208 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
212 assert(arg->op == op_Cond);
213 arg->attr.c.kind = fragmentary;
214 arg->attr.c.default_proj = max_proj;
215 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
220 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
224 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
225 res = optimize_node(res);
226 IRN_VRFY_IRG(res, irg);
231 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
235 assert(is_atomic_type(to_tp));
237 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
238 res->attr.cast.totype = to_tp;
239 res = optimize_node(res);
240 IRN_VRFY_IRG(res, irg);
245 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
249 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
250 res = optimize_node (res);
251 IRN_VRFY_IRG(res, irg);
256 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
257 ir_node *op1, ir_node *op2, ir_mode *mode)
264 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
265 res = optimize_node(res);
266 IRN_VRFY_IRG(res, irg);
271 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
279 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
280 res = optimize_node (res);
281 IRN_VRFY_IRG(res, irg);
286 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
287 ir_node *op, ir_mode *mode)
291 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
292 res = optimize_node(res);
293 IRN_VRFY_IRG(res, irg);
298 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
299 ir_node *op1, ir_node *op2, ir_mode *mode)
306 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
314 ir_node *memop, ir_node *op1, ir_node *op2)
322 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
323 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
329 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
330 ir_node *memop, ir_node *op1, ir_node *op2)
338 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
339 res = optimize_node(res);
340 IRN_VRFY_IRG(res, irg);
345 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
346 ir_node *memop, ir_node *op1, ir_node *op2)
354 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
355 res = optimize_node(res);
356 IRN_VRFY_IRG(res, irg);
361 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
362 ir_node *memop, ir_node *op1, ir_node *op2)
370 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
371 res = optimize_node(res);
372 IRN_VRFY_IRG(res, irg);
377 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
378 ir_node *op1, ir_node *op2, ir_mode *mode)
385 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
386 res = optimize_node(res);
387 IRN_VRFY_IRG(res, irg);
392 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
393 ir_node *op1, ir_node *op2, ir_mode *mode)
400 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
401 res = optimize_node(res);
402 IRN_VRFY_IRG(res, irg);
407 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
408 ir_node *op1, ir_node *op2, ir_mode *mode)
415 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
416 res = optimize_node (res);
417 IRN_VRFY_IRG(res, irg);
422 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
423 ir_node *op, ir_mode *mode)
427 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
428 res = optimize_node(res);
429 IRN_VRFY_IRG(res, irg);
434 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
435 ir_node *op, ir_node *k, ir_mode *mode)
442 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
450 ir_node *op, ir_node *k, ir_mode *mode)
457 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
458 res = optimize_node(res);
459 IRN_VRFY_IRG(res, irg);
464 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
465 ir_node *op, ir_node *k, ir_mode *mode)
472 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
473 res = optimize_node(res);
474 IRN_VRFY_IRG(res, irg);
479 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
480 ir_node *op, ir_node *k, ir_mode *mode)
487 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
495 ir_node *op, ir_mode *mode)
499 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
500 res = optimize_node (res);
501 IRN_VRFY_IRG(res, irg);
506 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
507 ir_node *op1, ir_node *op2)
514 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
515 res = optimize_node(res);
516 IRN_VRFY_IRG(res, irg);
521 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
525 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
526 res = optimize_node (res);
527 IRN_VRFY_IRG (res, irg);
532 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
536 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
537 res->attr.c.kind = dense;
538 res->attr.c.default_proj = 0;
539 res = optimize_node (res);
540 IRN_VRFY_IRG(res, irg);
545 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
546 ir_node *callee, int arity, ir_node **in, type *tp)
553 NEW_ARR_A(ir_node *, r_in, r_arity);
556 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
558 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
560 assert((get_unknown_type() == tp) || is_Method_type(tp));
561 set_Call_type(res, tp);
562 res->attr.call.exc.pin_state = op_pin_state_pinned;
563 res->attr.call.callee_arr = NULL;
564 res = optimize_node(res);
565 IRN_VRFY_IRG(res, irg);
570 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
571 ir_node *store, int arity, ir_node **in)
578 NEW_ARR_A (ir_node *, r_in, r_arity);
580 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
581 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
582 res = optimize_node(res);
583 IRN_VRFY_IRG(res, irg);
588 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
595 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
596 res = optimize_node(res);
597 IRN_VRFY_IRG(res, irg);
602 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
603 ir_node *store, ir_node *adr, ir_mode *mode)
610 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
611 res->attr.load.exc.pin_state = op_pin_state_pinned;
612 res->attr.load.load_mode = mode;
613 res->attr.load.volatility = volatility_non_volatile;
614 res = optimize_node(res);
615 IRN_VRFY_IRG(res, irg);
620 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
621 ir_node *store, ir_node *adr, ir_node *val)
629 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
630 res->attr.store.exc.pin_state = op_pin_state_pinned;
631 res->attr.store.volatility = volatility_non_volatile;
632 res = optimize_node(res);
633 IRN_VRFY_IRG(res, irg);
638 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
639 ir_node *size, type *alloc_type, where_alloc where)
646 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
647 res->attr.a.exc.pin_state = op_pin_state_pinned;
648 res->attr.a.where = where;
649 res->attr.a.type = alloc_type;
650 res = optimize_node(res);
651 IRN_VRFY_IRG(res, irg);
656 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
657 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
665 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
666 res->attr.f.where = where;
667 res->attr.f.type = free_type;
668 res = optimize_node(res);
669 IRN_VRFY_IRG(res, irg);
674 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
675 int arity, ir_node **in, entity *ent)
681 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
684 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
687 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
688 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
689 res->attr.s.ent = ent;
690 res = optimize_node(res);
691 IRN_VRFY_IRG(res, irg);
696 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
697 ir_node *objptr, type *ent)
704 NEW_ARR_A(ir_node *, r_in, r_arity);
708 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
709 res->attr.io.ent = ent;
711 /* res = optimize(res); */
712 IRN_VRFY_IRG(res, irg);
717 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
718 symconst_kind symkind, type *tp) {
722 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
727 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
729 res->attr.i.num = symkind;
730 res->attr.i.sym = value;
733 res = optimize_node(res);
734 IRN_VRFY_IRG(res, irg);
739 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
740 symconst_kind symkind)
742 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
746 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
747 symconst_symbol sym = {(type *)symbol};
748 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
751 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
752 symconst_symbol sym = {(type *)symbol};
753 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
756 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
757 symconst_symbol sym = {symbol};
758 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
761 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
762 symconst_symbol sym = {symbol};
763 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
767 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
771 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
772 res = optimize_node(res);
773 IRN_VRFY_IRG(res, irg);
778 new_rd_Bad (ir_graph *irg)
784 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
786 ir_node *in[2], *res;
790 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
791 res->attr.confirm_cmp = cmp;
792 res = optimize_node (res);
793 IRN_VRFY_IRG(res, irg);
798 new_rd_Unknown (ir_graph *irg, ir_mode *m)
800 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
804 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
809 in[0] = get_Call_ptr(call);
810 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
811 /* res->attr.callbegin.irg = irg; */
812 res->attr.callbegin.call = call;
813 res = optimize_node(res);
814 IRN_VRFY_IRG(res, irg);
819 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
823 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
825 IRN_VRFY_IRG(res, irg);
830 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
834 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
835 irg->end_except = res;
836 IRN_VRFY_IRG (res, irg);
841 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
845 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
846 res = optimize_node(res);
847 IRN_VRFY_IRG(res, irg);
852 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
857 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
858 res->attr.filter.proj = proj;
859 res->attr.filter.in_cg = NULL;
860 res->attr.filter.backedge = NULL;
863 assert(get_Proj_pred(res));
864 assert(get_nodes_block(get_Proj_pred(res)));
866 res = optimize_node(res);
867 IRN_VRFY_IRG(res, irg);
872 new_rd_NoMem (ir_graph *irg) {
877 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
878 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
887 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
890 res = optimize_node(res);
891 IRN_VRFY_IRG(res, irg);
896 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
897 return new_rd_Block(NULL, irg, arity, in);
899 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
900 return new_rd_Start(NULL, irg, block);
902 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
903 return new_rd_End(NULL, irg, block);
905 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
906 return new_rd_Jmp(NULL, irg, block);
908 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
909 return new_rd_Cond(NULL, irg, block, c);
911 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
912 ir_node *store, int arity, ir_node **in) {
913 return new_rd_Return(NULL, irg, block, store, arity, in);
915 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
916 ir_node *store, ir_node *obj) {
917 return new_rd_Raise(NULL, irg, block, store, obj);
919 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
920 ir_mode *mode, tarval *con) {
921 return new_rd_Const(NULL, irg, block, mode, con);
924 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
925 ir_mode *mode, long value) {
926 return new_rd_Const_long(NULL, irg, block, mode, value);
930 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
931 symconst_symbol value, symconst_kind symkind) {
932 return new_rd_SymConst(NULL, irg, block, value, symkind);
934 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
935 ir_node *objptr, int n_index, ir_node **index,
937 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
939 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
941 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
943 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
944 ir_node *callee, int arity, ir_node **in,
946 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
948 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
949 ir_node *op1, ir_node *op2, ir_mode *mode) {
950 return new_rd_Add(NULL, irg, block, op1, op2, mode);
952 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
953 ir_node *op1, ir_node *op2, ir_mode *mode) {
954 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
956 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
957 ir_node *op, ir_mode *mode) {
958 return new_rd_Minus(NULL, irg, block, op, mode);
960 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
961 ir_node *op1, ir_node *op2, ir_mode *mode) {
962 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
964 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
965 ir_node *memop, ir_node *op1, ir_node *op2) {
966 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
968 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
969 ir_node *memop, ir_node *op1, ir_node *op2) {
970 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
972 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
973 ir_node *memop, ir_node *op1, ir_node *op2) {
974 return new_rd_Div(NULL, irg, block, memop, op1, op2);
976 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
977 ir_node *memop, ir_node *op1, ir_node *op2) {
978 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
980 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
981 ir_node *op, ir_mode *mode) {
982 return new_rd_Abs(NULL, irg, block, op, mode);
984 ir_node *new_r_And (ir_graph *irg, ir_node *block,
985 ir_node *op1, ir_node *op2, ir_mode *mode) {
986 return new_rd_And(NULL, irg, block, op1, op2, mode);
988 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
989 ir_node *op1, ir_node *op2, ir_mode *mode) {
990 return new_rd_Or(NULL, irg, block, op1, op2, mode);
992 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
993 ir_node *op1, ir_node *op2, ir_mode *mode) {
994 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
996 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
997 ir_node *op, ir_mode *mode) {
998 return new_rd_Not(NULL, irg, block, op, mode);
1000 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1001 ir_node *op1, ir_node *op2) {
1002 return new_rd_Cmp(NULL, irg, block, op1, op2);
1004 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1005 ir_node *op, ir_node *k, ir_mode *mode) {
1006 return new_rd_Shl(NULL, irg, block, op, k, mode);
1008 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1009 ir_node *op, ir_node *k, ir_mode *mode) {
1010 return new_rd_Shr(NULL, irg, block, op, k, mode);
1012 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1013 ir_node *op, ir_node *k, ir_mode *mode) {
1014 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1016 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1017 ir_node *op, ir_node *k, ir_mode *mode) {
1018 return new_rd_Rot(NULL, irg, block, op, k, mode);
1020 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1021 ir_node *op, ir_mode *mode) {
1022 return new_rd_Conv(NULL, irg, block, op, mode);
1024 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1025 return new_rd_Cast(NULL, irg, block, op, to_tp);
1027 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1028 ir_node **in, ir_mode *mode) {
1029 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1031 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1032 ir_node *store, ir_node *adr, ir_mode *mode) {
1033 return new_rd_Load(NULL, irg, block, store, adr, mode);
1035 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1036 ir_node *store, ir_node *adr, ir_node *val) {
1037 return new_rd_Store(NULL, irg, block, store, adr, val);
1039 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1040 ir_node *size, type *alloc_type, where_alloc where) {
1041 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1043 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1044 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1045 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1047 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1048 return new_rd_Sync(NULL, irg, block, arity, in);
1050 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1051 ir_mode *mode, long proj) {
1052 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1054 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1056 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1058 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1059 int arity, ir_node **in) {
1060 return new_rd_Tuple(NULL, irg, block, arity, in );
1062 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1063 ir_node *val, ir_mode *mode) {
1064 return new_rd_Id(NULL, irg, block, val, mode);
1066 ir_node *new_r_Bad (ir_graph *irg) {
1067 return new_rd_Bad(irg);
1069 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1070 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1072 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1073 return new_rd_Unknown(irg, m);
1075 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1076 return new_rd_CallBegin(NULL, irg, block, callee);
1078 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1079 return new_rd_EndReg(NULL, irg, block);
1081 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1082 return new_rd_EndExcept(NULL, irg, block);
1084 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1085 return new_rd_Break(NULL, irg, block);
1087 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1088 ir_mode *mode, long proj) {
1089 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1091 ir_node *new_r_NoMem (ir_graph *irg) {
1092 return new_rd_NoMem(irg);
1094 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1095 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1096 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1100 /** ********************/
1101 /** public interfaces */
1102 /** construction tools */
1106 * - create a new Start node in the current block
1108 * @return s - pointer to the created Start node
1113 new_d_Start (dbg_info* db)
1117 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1118 op_Start, mode_T, 0, NULL);
1119 /* res->attr.start.irg = current_ir_graph; */
1121 res = optimize_node(res);
1122 IRN_VRFY_IRG(res, current_ir_graph);
1127 new_d_End (dbg_info* db)
1130 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1131 op_End, mode_X, -1, NULL);
1132 res = optimize_node(res);
1133 IRN_VRFY_IRG(res, current_ir_graph);
1138 /* Constructs a Block with a fixed number of predecessors.
1139 Does set current_block. Can be used with automatic Phi
1140 node construction. */
1142 new_d_Block (dbg_info* db, int arity, ir_node **in)
1146 bool has_unknown = false;
1148 res = new_rd_Block(db, current_ir_graph, arity, in);
1150 /* Create and initialize array for Phi-node construction. */
1151 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1152 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1153 current_ir_graph->n_loc);
1154 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1157 for (i = arity-1; i >= 0; i--)
1158 if (get_irn_op(in[i]) == op_Unknown) {
1163 if (!has_unknown) res = optimize_node(res);
1164 current_ir_graph->current_block = res;
1166 IRN_VRFY_IRG(res, current_ir_graph);
1171 /* ***********************************************************************/
1172 /* Methods necessary for automatic Phi node creation */
1174 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1175 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1176 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1177 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1179 Call Graph: ( A ---> B == A "calls" B)
1181 get_value mature_immBlock
1189 get_r_value_internal |
1193 new_rd_Phi0 new_rd_Phi_in
1195 * *************************************************************************** */
1197 /** Creates a Phi node with 0 predecessors */
1198 static INLINE ir_node *
1199 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1203 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1204 IRN_VRFY_IRG(res, irg);
1208 /* There are two implementations of the Phi node construction. The first
1209 is faster, but does not work for blocks with more than 2 predecessors.
1210 The second works always but is slower and causes more unnecessary Phi
1212 Select the implementations by the following preprocessor flag set in
1214 #if USE_FAST_PHI_CONSTRUCTION
1216 /* This is a stack used for allocating and deallocating nodes in
1217 new_rd_Phi_in. The original implementation used the obstack
1218 to model this stack, now it is explicit. This reduces side effects.
1220 #if USE_EXPLICIT_PHI_IN_STACK
1222 new_Phi_in_stack(void) {
1225 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1227 res->stack = NEW_ARR_F (ir_node *, 0);
1234 free_Phi_in_stack(Phi_in_stack *s) {
1235 DEL_ARR_F(s->stack);
1239 free_to_Phi_in_stack(ir_node *phi) {
1240 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1241 current_ir_graph->Phi_in_stack->pos)
1242 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1244 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1246 (current_ir_graph->Phi_in_stack->pos)++;
1249 static INLINE ir_node *
1250 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1251 int arity, ir_node **in) {
1253 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1254 int pos = current_ir_graph->Phi_in_stack->pos;
1258 /* We need to allocate a new node */
1259 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1260 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1262 /* reuse the old node and initialize it again. */
1265 assert (res->kind == k_ir_node);
1266 assert (res->op == op_Phi);
1270 assert (arity >= 0);
1271 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1272 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1274 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1276 (current_ir_graph->Phi_in_stack->pos)--;
1280 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1282 /* Creates a Phi node with a given, fixed array **in of predecessors.
1283 If the Phi node is unnecessary, as the same value reaches the block
1284 through all control flow paths, it is eliminated and the value
1285 returned directly. This constructor is only intended for use in
1286 the automatic Phi node generation triggered by get_value or mature.
1287 The implementation is quite tricky and depends on the fact, that
1288 the nodes are allocated on a stack:
1289 The in array contains predecessors and NULLs. The NULLs appear,
1290 if get_r_value_internal, that computed the predecessors, reached
1291 the same block on two paths. In this case the same value reaches
1292 this block on both paths, there is no definition in between. We need
1293 not allocate a Phi where these path's merge, but we have to communicate
1294 this fact to the caller. This happens by returning a pointer to the
1295 node the caller _will_ allocate. (Yes, we predict the address. We can
1296 do so because the nodes are allocated on the obstack.) The caller then
1297 finds a pointer to itself and, when this routine is called again,
1300 static INLINE ir_node *
1301 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1304 ir_node *res, *known;
1306 /* Allocate a new node on the obstack. This can return a node to
1307 which some of the pointers in the in-array already point.
1308 Attention: the constructor copies the in array, i.e., the later
1309 changes to the array in this routine do not affect the
1310 constructed node! If the in array contains NULLs, there will be
1311 missing predecessors in the returned node. Is this a possible
1312 internal state of the Phi node generation? */
1313 #if USE_EXPLICIT_PHI_IN_STACK
1314 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1316 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1317 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1320 /* The in-array can contain NULLs. These were returned by
1321 get_r_value_internal if it reached the same block/definition on a
1322 second path. The NULLs are replaced by the node itself to
1323 simplify the test in the next loop. */
1324 for (i = 0; i < ins; ++i) {
1329 /* This loop checks whether the Phi has more than one predecessor.
1330 If so, it is a real Phi node and we break the loop. Else the Phi
1331 node merges the same definition on several paths and therefore is
1333 for (i = 0; i < ins; ++i) {
1334 if (in[i] == res || in[i] == known)
1343 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1345 #if USE_EXPLICIT_PHI_IN_STACK
1346 free_to_Phi_in_stack(res);
1348 edges_node_deleted(res, current_ir_graph);
1349 obstack_free(current_ir_graph->obst, res);
1353 res = optimize_node (res);
1354 IRN_VRFY_IRG(res, irg);
1357 /* return the pointer to the Phi node. This node might be deallocated! */
1362 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1365 allocates and returns this node. The routine called to allocate the
1366 node might optimize it away and return a real value, or even a pointer
1367 to a deallocated Phi node on top of the obstack!
1368 This function is called with an in-array of proper size. **/
1370 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1372 ir_node *prevBlock, *res;
1375 /* This loop goes to all predecessor blocks of the block the Phi node is in
1376 and there finds the operands of the Phi node by calling
1377 get_r_value_internal. */
1378 for (i = 1; i <= ins; ++i) {
1379 assert (block->in[i]);
1380 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1382 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1385 /* After collecting all predecessors into the array nin a new Phi node
1386 with these predecessors is created. This constructor contains an
1387 optimization: If all predecessors of the Phi node are identical it
1388 returns the only operand instead of a new Phi node. If the value
1389 passes two different control flow edges without being defined, and
1390 this is the second path treated, a pointer to the node that will be
1391 allocated for the first path (recursion) is returned. We already
1392 know the address of this node, as it is the next node to be allocated
1393 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1394 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1396 /* Now we now the value for "pos" and can enter it in the array with
1397 all known local variables. Attention: this might be a pointer to
1398 a node, that later will be allocated!!! See new_rd_Phi_in.
1399 If this is called in mature, after some set_value in the same block,
1400 the proper value must not be overwritten:
1402 get_value (makes Phi0, put's it into graph_arr)
1403 set_value (overwrites Phi0 in graph_arr)
1404 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1407 if (!block->attr.block.graph_arr[pos]) {
1408 block->attr.block.graph_arr[pos] = res;
1410 /* printf(" value already computed by %s\n",
1411 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1417 /* This function returns the last definition of a variable. In case
1418 this variable was last defined in a previous block, Phi nodes are
1419 inserted. If the part of the firm graph containing the definition
1420 is not yet constructed, a dummy Phi node is returned. */
1422 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1425 /* There are 4 cases to treat.
1427 1. The block is not mature and we visit it the first time. We can not
1428 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1429 predecessors is returned. This node is added to the linked list (field
1430 "link") of the containing block to be completed when this block is
1431 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1434 2. The value is already known in this block, graph_arr[pos] is set and we
1435 visit the block the first time. We can return the value without
1436 creating any new nodes.
1438 3. The block is mature and we visit it the first time. A Phi node needs
1439 to be created (phi_merge). If the Phi is not needed, as all it's
1440 operands are the same value reaching the block through different
1441 paths, it's optimized away and the value itself is returned.
1443 4. The block is mature, and we visit it the second time. Now two
1444 subcases are possible:
1445 * The value was computed completely the last time we were here. This
1446 is the case if there is no loop. We can return the proper value.
1447 * The recursion that visited this node and set the flag did not
1448 return yet. We are computing a value in a loop and need to
1449 break the recursion without knowing the result yet.
1450 @@@ strange case. Straight forward we would create a Phi before
1451 starting the computation of it's predecessors. In this case we will
1452 find a Phi here in any case. The problem is that this implementation
1453 only creates a Phi after computing the predecessors, so that it is
1454 hard to compute self references of this Phi. @@@
1455 There is no simple check for the second subcase. Therefore we check
1456 for a second visit and treat all such cases as the second subcase.
1457 Anyways, the basic situation is the same: we reached a block
1458 on two paths without finding a definition of the value: No Phi
1459 nodes are needed on both paths.
1460 We return this information "Two paths, no Phi needed" by a very tricky
1461 implementation that relies on the fact that an obstack is a stack and
1462 will return a node with the same address on different allocations.
1463 Look also at phi_merge and new_rd_phi_in to understand this.
1464 @@@ Unfortunately this does not work, see testprogram
1465 three_cfpred_example.
1469 /* case 4 -- already visited. */
1470 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1472 /* visited the first time */
1473 set_irn_visited(block, get_irg_visited(current_ir_graph));
1475 /* Get the local valid value */
1476 res = block->attr.block.graph_arr[pos];
1478 /* case 2 -- If the value is actually computed, return it. */
1479 if (res) return res;
1481 if (block->attr.block.matured) { /* case 3 */
1483 /* The Phi has the same amount of ins as the corresponding block. */
1484 int ins = get_irn_arity(block);
1486 NEW_ARR_A (ir_node *, nin, ins);
1488 /* Phi merge collects the predecessors and then creates a node. */
1489 res = phi_merge (block, pos, mode, nin, ins);
1491 } else { /* case 1 */
1492 /* The block is not mature, we don't know how many in's are needed. A Phi
1493 with zero predecessors is created. Such a Phi node is called Phi0
1494 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1495 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1497 The Phi0 has to remember the pos of it's internal value. If the real
1498 Phi is computed, pos is used to update the array with the local
1501 res = new_rd_Phi0 (current_ir_graph, block, mode);
1502 res->attr.phi0_pos = pos;
1503 res->link = block->link;
1507 /* If we get here, the frontend missed a use-before-definition error */
1510 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1511 assert (mode->code >= irm_F && mode->code <= irm_P);
1512 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1513 tarval_mode_null[mode->code]);
1516 /* The local valid value is available now. */
1517 block->attr.block.graph_arr[pos] = res;
1525 it starts the recursion. This causes an Id at the entry of
1526 every block that has no definition of the value! **/
1528 #if USE_EXPLICIT_PHI_IN_STACK
1530 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1531 void free_Phi_in_stack(Phi_in_stack *s) { }
1534 static INLINE ir_node *
1535 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1536 ir_node **in, int ins, ir_node *phi0)
1539 ir_node *res, *known;
1541 /* Allocate a new node on the obstack. The allocation copies the in
1543 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1544 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1546 /* This loop checks whether the Phi has more than one predecessor.
1547 If so, it is a real Phi node and we break the loop. Else the
1548 Phi node merges the same definition on several paths and therefore
1549 is not needed. Don't consider Bad nodes! */
1551 for (i=0; i < ins; ++i)
1555 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1557 /* Optimize self referencing Phis: We can't detect them yet properly, as
1558 they still refer to the Phi0 they will replace. So replace right now. */
1559 if (phi0 && in[i] == phi0) in[i] = res;
1561 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1569 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1572 edges_node_deleted(res, current_ir_graph);
1573 obstack_free (current_ir_graph->obst, res);
1574 if (is_Phi(known)) {
1575 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1576 order, an enclosing Phi know may get superfluous. */
1577 res = optimize_in_place_2(known);
1579 exchange(known, res);
1585 /* A undefined value, e.g., in unreachable code. */
1589 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1590 IRN_VRFY_IRG(res, irg);
1591 /* Memory Phis in endless loops must be kept alive.
1592 As we can't distinguish these easily we keep all of them alive. */
1593 if ((res->op == op_Phi) && (mode == mode_M))
1594 add_End_keepalive(irg->end, res);
1601 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1603 #if PRECISE_EXC_CONTEXT
1605 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1607 /* Construct a new frag_array for node n.
1608 Copy the content from the current graph_arr of the corresponding block:
1609 this is the current state.
1610 Set ProjM(n) as current memory state.
1611 Further the last entry in frag_arr of current block points to n. This
1612 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1614 static INLINE ir_node ** new_frag_arr (ir_node *n)
1619 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1620 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1621 sizeof(ir_node *)*current_ir_graph->n_loc);
1623 /* turn off optimization before allocating Proj nodes, as res isn't
1625 opt = get_opt_optimize(); set_optimize(0);
1626 /* Here we rely on the fact that all frag ops have Memory as first result! */
1627 if (get_irn_op(n) == op_Call)
1628 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1630 assert((pn_Quot_M == pn_DivMod_M) &&
1631 (pn_Quot_M == pn_Div_M) &&
1632 (pn_Quot_M == pn_Mod_M) &&
1633 (pn_Quot_M == pn_Load_M) &&
1634 (pn_Quot_M == pn_Store_M) &&
1635 (pn_Quot_M == pn_Alloc_M) );
1636 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1640 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1645 * returns the frag_arr from a node
1647 static INLINE ir_node **
1648 get_frag_arr (ir_node *n) {
1649 switch (get_irn_opcode(n)) {
1651 return n->attr.call.exc.frag_arr;
1653 return n->attr.a.exc.frag_arr;
1655 return n->attr.load.exc.frag_arr;
1657 return n->attr.store.exc.frag_arr;
1659 return n->attr.except.frag_arr;
1664 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1666 if (!frag_arr[pos]) frag_arr[pos] = val;
1667 if (frag_arr[current_ir_graph->n_loc - 1]) {
1668 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1669 assert(arr != frag_arr && "Endless recursion detected");
1670 set_frag_value(arr, pos, val);
1675 for (i = 0; i < 1000; ++i) {
1676 if (!frag_arr[pos]) {
1677 frag_arr[pos] = val;
1679 if (frag_arr[current_ir_graph->n_loc - 1]) {
1680 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1686 assert(0 && "potential endless recursion");
1691 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1695 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1697 frag_arr = get_frag_arr(cfOp);
1698 res = frag_arr[pos];
1700 if (block->attr.block.graph_arr[pos]) {
1701 /* There was a set_value after the cfOp and no get_value before that
1702 set_value. We must build a Phi node now. */
1703 if (block->attr.block.matured) {
1704 int ins = get_irn_arity(block);
1706 NEW_ARR_A (ir_node *, nin, ins);
1707 res = phi_merge(block, pos, mode, nin, ins);
1709 res = new_rd_Phi0 (current_ir_graph, block, mode);
1710 res->attr.phi0_pos = pos;
1711 res->link = block->link;
1715 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1716 but this should be better: (remove comment if this works) */
1717 /* It's a Phi, we can write this into all graph_arrs with NULL */
1718 set_frag_value(block->attr.block.graph_arr, pos, res);
1720 res = get_r_value_internal(block, pos, mode);
1721 set_frag_value(block->attr.block.graph_arr, pos, res);
1729 computes the predecessors for the real phi node, and then
1730 allocates and returns this node. The routine called to allocate the
1731 node might optimize it away and return a real value.
1732 This function must be called with an in-array of proper size. **/
1734 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1736 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1739 /* If this block has no value at pos create a Phi0 and remember it
1740 in graph_arr to break recursions.
1741 Else we may not set graph_arr as there a later value is remembered. */
1743 if (!block->attr.block.graph_arr[pos]) {
1744 if (block == get_irg_start_block(current_ir_graph)) {
1745 /* Collapsing to Bad tarvals is no good idea.
1746 So we call a user-supplied routine here that deals with this case as
1747 appropriate for the given language. Sorryly the only help we can give
1748 here is the position.
1750 Even if all variables are defined before use, it can happen that
1751 we get to the start block, if a cond has been replaced by a tuple
1752 (bad, jmp). In this case we call the function needlessly, eventually
1753 generating an non existant error.
1754 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1757 if (default_initialize_local_variable)
1758 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1760 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1761 /* We don't need to care about exception ops in the start block.
1762 There are none by definition. */
1763 return block->attr.block.graph_arr[pos];
1765 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1766 block->attr.block.graph_arr[pos] = phi0;
1767 #if PRECISE_EXC_CONTEXT
1768 if (get_opt_precise_exc_context()) {
1769 /* Set graph_arr for fragile ops. Also here we should break recursion.
1770 We could choose a cyclic path through an cfop. But the recursion would
1771 break at some point. */
1772 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1778 /* This loop goes to all predecessor blocks of the block the Phi node
1779 is in and there finds the operands of the Phi node by calling
1780 get_r_value_internal. */
1781 for (i = 1; i <= ins; ++i) {
1782 prevCfOp = skip_Proj(block->in[i]);
1784 if (is_Bad(prevCfOp)) {
1785 /* In case a Cond has been optimized we would get right to the start block
1786 with an invalid definition. */
1787 nin[i-1] = new_Bad();
1790 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1792 if (!is_Bad(prevBlock)) {
1793 #if PRECISE_EXC_CONTEXT
1794 if (get_opt_precise_exc_context() &&
1795 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1796 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1797 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1800 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1802 nin[i-1] = new_Bad();
1806 /* We want to pass the Phi0 node to the constructor: this finds additional
1807 optimization possibilities.
1808 The Phi0 node either is allocated in this function, or it comes from
1809 a former call to get_r_value_internal. In this case we may not yet
1810 exchange phi0, as this is done in mature_immBlock. */
1812 phi0_all = block->attr.block.graph_arr[pos];
1813 if (!((get_irn_op(phi0_all) == op_Phi) &&
1814 (get_irn_arity(phi0_all) == 0) &&
1815 (get_nodes_block(phi0_all) == block)))
1821 /* After collecting all predecessors into the array nin a new Phi node
1822 with these predecessors is created. This constructor contains an
1823 optimization: If all predecessors of the Phi node are identical it
1824 returns the only operand instead of a new Phi node. */
1825 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1827 /* In case we allocated a Phi0 node at the beginning of this procedure,
1828 we need to exchange this Phi0 with the real Phi. */
1830 exchange(phi0, res);
1831 block->attr.block.graph_arr[pos] = res;
1832 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1833 only an optimization. */
1839 /* This function returns the last definition of a variable. In case
1840 this variable was last defined in a previous block, Phi nodes are
1841 inserted. If the part of the firm graph containing the definition
1842 is not yet constructed, a dummy Phi node is returned. */
1844 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1847 /* There are 4 cases to treat.
1849 1. The block is not mature and we visit it the first time. We can not
1850 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1851 predecessors is returned. This node is added to the linked list (field
1852 "link") of the containing block to be completed when this block is
1853 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1856 2. The value is already known in this block, graph_arr[pos] is set and we
1857 visit the block the first time. We can return the value without
1858 creating any new nodes.
1860 3. The block is mature and we visit it the first time. A Phi node needs
1861 to be created (phi_merge). If the Phi is not needed, as all it's
1862 operands are the same value reaching the block through different
1863 paths, it's optimized away and the value itself is returned.
1865 4. The block is mature, and we visit it the second time. Now two
1866 subcases are possible:
1867 * The value was computed completely the last time we were here. This
1868 is the case if there is no loop. We can return the proper value.
1869 * The recursion that visited this node and set the flag did not
1870 return yet. We are computing a value in a loop and need to
1871 break the recursion. This case only happens if we visited
1872 the same block with phi_merge before, which inserted a Phi0.
1873 So we return the Phi0.
1876 /* case 4 -- already visited. */
1877 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1878 /* As phi_merge allocates a Phi0 this value is always defined. Here
1879 is the critical difference of the two algorithms. */
1880 assert(block->attr.block.graph_arr[pos]);
1881 return block->attr.block.graph_arr[pos];
1884 /* visited the first time */
1885 set_irn_visited(block, get_irg_visited(current_ir_graph));
1887 /* Get the local valid value */
1888 res = block->attr.block.graph_arr[pos];
1890 /* case 2 -- If the value is actually computed, return it. */
1891 if (res) { return res; };
1893 if (block->attr.block.matured) { /* case 3 */
1895 /* The Phi has the same amount of ins as the corresponding block. */
1896 int ins = get_irn_arity(block);
1898 NEW_ARR_A (ir_node *, nin, ins);
1900 /* Phi merge collects the predecessors and then creates a node. */
1901 res = phi_merge (block, pos, mode, nin, ins);
1903 } else { /* case 1 */
1904 /* The block is not mature, we don't know how many in's are needed. A Phi
1905 with zero predecessors is created. Such a Phi node is called Phi0
1906 node. The Phi0 is then added to the list of Phi0 nodes in this block
1907 to be matured by mature_immBlock later.
1908 The Phi0 has to remember the pos of it's internal value. If the real
1909 Phi is computed, pos is used to update the array with the local
1911 res = new_rd_Phi0 (current_ir_graph, block, mode);
1912 res->attr.phi0_pos = pos;
1913 res->link = block->link;
1917 /* If we get here, the frontend missed a use-before-definition error */
1920 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1921 assert (mode->code >= irm_F && mode->code <= irm_P);
1922 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1923 get_mode_null(mode));
1926 /* The local valid value is available now. */
1927 block->attr.block.graph_arr[pos] = res;
1932 #endif /* USE_FAST_PHI_CONSTRUCTION */
1934 /* ************************************************************************** */
1936 /** Finalize a Block node, when all control flows are known. */
1937 /** Acceptable parameters are only Block nodes. */
1939 mature_immBlock (ir_node *block)
1946 assert (get_irn_opcode(block) == iro_Block);
1947 /* @@@ should be commented in
1948 assert (!get_Block_matured(block) && "Block already matured"); */
1950 if (!get_Block_matured(block)) {
1951 ins = ARR_LEN (block->in)-1;
1952 /* Fix block parameters */
1953 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1955 /* An array for building the Phi nodes. */
1956 NEW_ARR_A (ir_node *, nin, ins);
1958 /* Traverse a chain of Phi nodes attached to this block and mature
1960 for (n = block->link; n; n=next) {
1961 inc_irg_visited(current_ir_graph);
1963 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1966 block->attr.block.matured = 1;
1968 /* Now, as the block is a finished firm node, we can optimize it.
1969 Since other nodes have been allocated since the block was created
1970 we can not free the node on the obstack. Therefore we have to call
1972 Unfortunately the optimization does not change a lot, as all allocated
1973 nodes refer to the unoptimized node.
1974 We can call _2, as global cse has no effect on blocks. */
1975 block = optimize_in_place_2(block);
1976 IRN_VRFY_IRG(block, current_ir_graph);
1981 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1983 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1988 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1990 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1995 new_d_Const_long(dbg_info* db, ir_mode *mode, long value)
1997 return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value);
2001 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
2003 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
2009 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
2011 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
2016 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
2018 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2023 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2026 assert(arg->op == op_Cond);
2027 arg->attr.c.kind = fragmentary;
2028 arg->attr.c.default_proj = max_proj;
2029 res = new_Proj (arg, mode_X, max_proj);
2034 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2036 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2041 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2043 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2047 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2049 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2054 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2056 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2061 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2063 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2069 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2071 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2076 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2078 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2083 * allocate the frag array
2085 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2086 if (get_opt_precise_exc_context()) {
2087 if ((current_ir_graph->phase_state == phase_building) &&
2088 (get_irn_op(res) == op) && /* Could be optimized away. */
2089 !*frag_store) /* Could be a cse where the arr is already set. */ {
2090 *frag_store = new_frag_arr(res);
2097 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2100 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2102 res->attr.except.pin_state = op_pin_state_pinned;
2103 #if PRECISE_EXC_CONTEXT
2104 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2111 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2114 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2116 res->attr.except.pin_state = op_pin_state_pinned;
2117 #if PRECISE_EXC_CONTEXT
2118 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2125 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2128 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2130 res->attr.except.pin_state = op_pin_state_pinned;
2131 #if PRECISE_EXC_CONTEXT
2132 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2139 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2142 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2144 res->attr.except.pin_state = op_pin_state_pinned;
2145 #if PRECISE_EXC_CONTEXT
2146 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2153 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2155 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2160 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2162 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2167 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2169 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2174 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2176 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2181 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2183 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2188 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2190 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2195 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2197 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2202 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2204 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2209 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2211 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2216 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2218 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2223 new_d_Jmp (dbg_info* db)
2225 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2229 new_d_Cond (dbg_info* db, ir_node *c)
2231 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2235 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2239 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2240 store, callee, arity, in, tp);
2241 #if PRECISE_EXC_CONTEXT
2242 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2249 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2251 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2256 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2258 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2263 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2266 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2268 #if PRECISE_EXC_CONTEXT
2269 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2276 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2279 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2281 #if PRECISE_EXC_CONTEXT
2282 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2289 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2293 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2294 store, size, alloc_type, where);
2295 #if PRECISE_EXC_CONTEXT
2296 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2303 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2304 ir_node *size, type *free_type, where_alloc where)
2306 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2307 store, ptr, size, free_type, where);
2311 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2312 /* GL: objptr was called frame before. Frame was a bad choice for the name
2313 as the operand could as well be a pointer to a dynamic object. */
2315 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2316 store, objptr, 0, NULL, ent);
2320 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2322 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2323 store, objptr, n_index, index, sel);
2327 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2329 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2330 store, objptr, ent));
2334 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2336 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2341 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2343 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2348 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2350 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2358 return _new_d_Bad();
2362 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2364 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2369 new_d_Unknown (ir_mode *m)
2371 return new_rd_Unknown(current_ir_graph, m);
2375 new_d_CallBegin (dbg_info *db, ir_node *call)
2378 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2383 new_d_EndReg (dbg_info *db)
2386 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2391 new_d_EndExcept (dbg_info *db)
2394 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2399 new_d_Break (dbg_info *db)
2401 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2405 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2407 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2414 return _new_d_NoMem();
2418 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2419 ir_node *ir_true, ir_mode *mode) {
2420 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2421 sel, ir_false, ir_true, mode);
2424 /* ********************************************************************* */
2425 /* Comfortable interface with automatic Phi node construction. */
2426 /* (Uses also constructors of ?? interface, except new_Block. */
2427 /* ********************************************************************* */
2429 /* * Block construction **/
2430 /* immature Block without predecessors */
2431 ir_node *new_d_immBlock (dbg_info* db) {
2434 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2435 /* creates a new dynamic in-array as length of in is -1 */
2436 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2437 current_ir_graph->current_block = res;
2438 res->attr.block.matured = 0;
2439 res->attr.block.dead = 0;
2440 /* res->attr.block.exc = exc_normal; */
2441 /* res->attr.block.handler_entry = 0; */
2442 res->attr.block.irg = current_ir_graph;
2443 res->attr.block.backedge = NULL;
2444 res->attr.block.in_cg = NULL;
2445 res->attr.block.cg_backedge = NULL;
2446 set_Block_block_visited(res, 0);
2448 /* Create and initialize array for Phi-node construction. */
2449 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2450 current_ir_graph->n_loc);
2451 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2453 /* Immature block may not be optimized! */
2454 IRN_VRFY_IRG(res, current_ir_graph);
2460 new_immBlock (void) {
2461 return new_d_immBlock(NULL);
2464 /* add an adge to a jmp/control flow node */
2466 add_immBlock_pred (ir_node *block, ir_node *jmp)
2468 if (block->attr.block.matured) {
2469 assert(0 && "Error: Block already matured!\n");
2472 assert(jmp != NULL);
2473 ARR_APP1(ir_node *, block->in, jmp);
2477 /* changing the current block */
2479 set_cur_block (ir_node *target)
2481 current_ir_graph->current_block = target;
2484 /* ************************ */
2485 /* parameter administration */
2487 /* get a value from the parameter array from the current block by its index */
2489 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2491 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2492 inc_irg_visited(current_ir_graph);
2494 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2496 /* get a value from the parameter array from the current block by its index */
2498 get_value (int pos, ir_mode *mode)
2500 return get_d_value(NULL, pos, mode);
2503 /* set a value at position pos in the parameter array from the current block */
2505 set_value (int pos, ir_node *value)
2507 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2508 assert(pos+1 < current_ir_graph->n_loc);
2509 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2512 /* get the current store */
2516 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2517 /* GL: one could call get_value instead */
2518 inc_irg_visited(current_ir_graph);
2519 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2522 /* set the current store */
2524 set_store (ir_node *store)
2526 /* GL: one could call set_value instead */
2527 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2528 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2532 keep_alive (ir_node *ka)
2534 add_End_keepalive(current_ir_graph->end, ka);
2537 /** Useful access routines **/
2538 /* Returns the current block of the current graph. To set the current
2539 block use set_cur_block. */
2540 ir_node *get_cur_block() {
2541 return get_irg_current_block(current_ir_graph);
2544 /* Returns the frame type of the current graph */
2545 type *get_cur_frame_type() {
2546 return get_irg_frame_type(current_ir_graph);
2550 /* ********************************************************************* */
2553 /* call once for each run of the library */
2555 init_cons(uninitialized_local_variable_func_t *func)
2557 default_initialize_local_variable = func;
2560 /* call for each graph */
2562 finalize_cons (ir_graph *irg) {
2563 irg->phase_state = phase_high;
2567 ir_node *new_Block(int arity, ir_node **in) {
2568 return new_d_Block(NULL, arity, in);
2570 ir_node *new_Start (void) {
2571 return new_d_Start(NULL);
2573 ir_node *new_End (void) {
2574 return new_d_End(NULL);
2576 ir_node *new_Jmp (void) {
2577 return new_d_Jmp(NULL);
2579 ir_node *new_Cond (ir_node *c) {
2580 return new_d_Cond(NULL, c);
2582 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2583 return new_d_Return(NULL, store, arity, in);
2585 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2586 return new_d_Raise(NULL, store, obj);
2588 ir_node *new_Const (ir_mode *mode, tarval *con) {
2589 return new_d_Const(NULL, mode, con);
2592 ir_node *new_Const_long(ir_mode *mode, long value)
2594 return new_d_Const_long(NULL, mode, value);
2597 ir_node *new_Const_type(tarval *con, type *tp) {
2598 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2601 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2602 return new_d_SymConst(NULL, value, kind);
2604 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2605 return new_d_simpleSel(NULL, store, objptr, ent);
2607 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2609 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2611 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2612 return new_d_InstOf (NULL, store, objptr, ent);
2614 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2616 return new_d_Call(NULL, store, callee, arity, in, tp);
2618 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2619 return new_d_Add(NULL, op1, op2, mode);
2621 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2622 return new_d_Sub(NULL, op1, op2, mode);
2624 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2625 return new_d_Minus(NULL, op, mode);
2627 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2628 return new_d_Mul(NULL, op1, op2, mode);
2630 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2631 return new_d_Quot(NULL, memop, op1, op2);
2633 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2634 return new_d_DivMod(NULL, memop, op1, op2);
2636 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2637 return new_d_Div(NULL, memop, op1, op2);
2639 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2640 return new_d_Mod(NULL, memop, op1, op2);
2642 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2643 return new_d_Abs(NULL, op, mode);
2645 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2646 return new_d_And(NULL, op1, op2, mode);
2648 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2649 return new_d_Or(NULL, op1, op2, mode);
2651 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2652 return new_d_Eor(NULL, op1, op2, mode);
2654 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2655 return new_d_Not(NULL, op, mode);
2657 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2658 return new_d_Shl(NULL, op, k, mode);
2660 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2661 return new_d_Shr(NULL, op, k, mode);
2663 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2664 return new_d_Shrs(NULL, op, k, mode);
2666 #define new_Rotate new_Rot
2667 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2668 return new_d_Rot(NULL, op, k, mode);
2670 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2671 return new_d_Cmp(NULL, op1, op2);
2673 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2674 return new_d_Conv(NULL, op, mode);
2676 ir_node *new_Cast (ir_node *op, type *to_tp) {
2677 return new_d_Cast(NULL, op, to_tp);
2679 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2680 return new_d_Phi(NULL, arity, in, mode);
2682 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2683 return new_d_Load(NULL, store, addr, mode);
2685 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2686 return new_d_Store(NULL, store, addr, val);
2688 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2689 where_alloc where) {
2690 return new_d_Alloc(NULL, store, size, alloc_type, where);
2692 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2693 type *free_type, where_alloc where) {
2694 return new_d_Free(NULL, store, ptr, size, free_type, where);
2696 ir_node *new_Sync (int arity, ir_node **in) {
2697 return new_d_Sync(NULL, arity, in);
2699 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2700 return new_d_Proj(NULL, arg, mode, proj);
2702 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2703 return new_d_defaultProj(NULL, arg, max_proj);
2705 ir_node *new_Tuple (int arity, ir_node **in) {
2706 return new_d_Tuple(NULL, arity, in);
2708 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2709 return new_d_Id(NULL, val, mode);
2711 ir_node *new_Bad (void) {
2714 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2715 return new_d_Confirm (NULL, val, bound, cmp);
2717 ir_node *new_Unknown(ir_mode *m) {
2718 return new_d_Unknown(m);
2720 ir_node *new_CallBegin (ir_node *callee) {
2721 return new_d_CallBegin(NULL, callee);
2723 ir_node *new_EndReg (void) {
2724 return new_d_EndReg(NULL);
2726 ir_node *new_EndExcept (void) {
2727 return new_d_EndExcept(NULL);
2729 ir_node *new_Break (void) {
2730 return new_d_Break(NULL);
2732 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2733 return new_d_Filter(NULL, arg, mode, proj);
2735 ir_node *new_NoMem (void) {
2736 return new_d_NoMem();
2738 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2739 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);