3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irgraph_t.h"
29 # include "irnode_t.h"
30 # include "irmode_t.h"
31 # include "ircons_t.h"
32 # include "firm_common_t.h"
38 # include "irbackedge_t.h"
39 # include "irflag_t.h"
40 # include "iredges_t.h"
42 #if USE_EXPLICIT_PHI_IN_STACK
43 /* A stack needed for the automatic Phi node construction in constructor
44 Phi_in. Redefinition in irgraph.c!! */
49 typedef struct Phi_in_stack Phi_in_stack;
52 /* when we need verifying */
54 # define IRN_VRFY_IRG(res, irg)
56 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
60 * language dependant initialization variable
62 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
64 /* -------------------------------------------- */
65 /* privat interfaces, for professional use only */
66 /* -------------------------------------------- */
68 /* Constructs a Block with a fixed number of predecessors.
69 Does not set current_block. Can not be used with automatic
70 Phi node construction. */
72 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
76 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
77 set_Block_matured(res, 1);
78 set_Block_block_visited(res, 0);
80 /* res->attr.block.exc = exc_normal; */
81 /* res->attr.block.handler_entry = 0; */
82 res->attr.block.dead = 0;
83 res->attr.block.irg = irg;
84 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
85 res->attr.block.in_cg = NULL;
86 res->attr.block.cg_backedge = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
97 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
98 /* res->attr.start.irg = irg; */
100 IRN_VRFY_IRG(res, irg);
105 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
109 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
111 IRN_VRFY_IRG(res, irg);
115 /* Creates a Phi node with all predecessors. Calling this constructor
116 is only allowed if the corresponding block is mature. */
118 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
122 bool has_unknown = false;
124 /* Don't assert that block matured: the use of this constructor is strongly
126 if ( get_Block_matured(block) )
127 assert( get_irn_arity(block) == arity );
129 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
131 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
133 for (i = arity-1; i >= 0; i--)
134 if (get_irn_op(in[i]) == op_Unknown) {
139 if (!has_unknown) res = optimize_node (res);
140 IRN_VRFY_IRG(res, irg);
142 /* Memory Phis in endless loops must be kept alive.
143 As we can't distinguish these easily we keep all of them alive. */
144 if ((res->op == op_Phi) && (mode == mode_M))
145 add_End_keepalive(irg->end, res);
150 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
154 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
155 res->attr.con.tv = con;
156 set_Const_type(res, tp); /* Call method because of complex assertion. */
157 res = optimize_node (res);
158 assert(get_Const_type(res) == tp);
159 IRN_VRFY_IRG(res, irg);
165 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
167 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
171 new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
173 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
177 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
181 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
182 res = optimize_node(res);
183 IRN_VRFY_IRG(res, irg);
188 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
193 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
194 res->attr.proj = proj;
197 assert(get_Proj_pred(res));
198 assert(get_nodes_block(get_Proj_pred(res)));
200 res = optimize_node(res);
202 IRN_VRFY_IRG(res, irg);
208 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
212 assert(arg->op == op_Cond);
213 arg->attr.c.kind = fragmentary;
214 arg->attr.c.default_proj = max_proj;
215 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
220 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
224 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
225 res = optimize_node(res);
226 IRN_VRFY_IRG(res, irg);
231 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
235 assert(is_atomic_type(to_tp));
237 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
238 res->attr.cast.totype = to_tp;
239 res = optimize_node(res);
240 IRN_VRFY_IRG(res, irg);
245 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
249 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
250 res = optimize_node (res);
251 IRN_VRFY_IRG(res, irg);
256 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
257 ir_node *op1, ir_node *op2, ir_mode *mode)
264 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
265 res = optimize_node(res);
266 IRN_VRFY_IRG(res, irg);
271 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
279 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
280 res = optimize_node (res);
281 IRN_VRFY_IRG(res, irg);
286 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
287 ir_node *op, ir_mode *mode)
291 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
292 res = optimize_node(res);
293 IRN_VRFY_IRG(res, irg);
298 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
299 ir_node *op1, ir_node *op2, ir_mode *mode)
306 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
314 ir_node *memop, ir_node *op1, ir_node *op2)
322 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
323 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
329 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
330 ir_node *memop, ir_node *op1, ir_node *op2)
338 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
339 res = optimize_node(res);
340 IRN_VRFY_IRG(res, irg);
345 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
346 ir_node *memop, ir_node *op1, ir_node *op2)
354 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
355 res = optimize_node(res);
356 IRN_VRFY_IRG(res, irg);
361 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
362 ir_node *memop, ir_node *op1, ir_node *op2)
370 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
371 res = optimize_node(res);
372 IRN_VRFY_IRG(res, irg);
377 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
378 ir_node *op1, ir_node *op2, ir_mode *mode)
385 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
386 res = optimize_node(res);
387 IRN_VRFY_IRG(res, irg);
392 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
393 ir_node *op1, ir_node *op2, ir_mode *mode)
400 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
401 res = optimize_node(res);
402 IRN_VRFY_IRG(res, irg);
407 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
408 ir_node *op1, ir_node *op2, ir_mode *mode)
415 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
416 res = optimize_node (res);
417 IRN_VRFY_IRG(res, irg);
422 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
423 ir_node *op, ir_mode *mode)
427 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
428 res = optimize_node(res);
429 IRN_VRFY_IRG(res, irg);
434 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
435 ir_node *op, ir_node *k, ir_mode *mode)
442 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
450 ir_node *op, ir_node *k, ir_mode *mode)
457 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
458 res = optimize_node(res);
459 IRN_VRFY_IRG(res, irg);
464 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
465 ir_node *op, ir_node *k, ir_mode *mode)
472 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
473 res = optimize_node(res);
474 IRN_VRFY_IRG(res, irg);
479 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
480 ir_node *op, ir_node *k, ir_mode *mode)
487 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
495 ir_node *op, ir_mode *mode)
499 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
500 res = optimize_node (res);
501 IRN_VRFY_IRG(res, irg);
506 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
507 ir_node *op1, ir_node *op2)
514 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
515 res = optimize_node(res);
516 IRN_VRFY_IRG(res, irg);
521 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
525 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
526 res = optimize_node (res);
527 IRN_VRFY_IRG (res, irg);
532 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
536 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
537 res->attr.c.kind = dense;
538 res->attr.c.default_proj = 0;
539 res = optimize_node (res);
540 IRN_VRFY_IRG(res, irg);
545 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
546 ir_node *callee, int arity, ir_node **in, type *tp)
553 NEW_ARR_A(ir_node *, r_in, r_arity);
556 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
558 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
560 assert((get_unknown_type() == tp) || is_Method_type(tp));
561 set_Call_type(res, tp);
562 res->attr.call.exc.pin_state = op_pin_state_pinned;
563 res->attr.call.callee_arr = NULL;
564 res = optimize_node(res);
565 IRN_VRFY_IRG(res, irg);
570 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
571 ir_node *store, int arity, ir_node **in)
578 NEW_ARR_A (ir_node *, r_in, r_arity);
580 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
581 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
582 res = optimize_node(res);
583 IRN_VRFY_IRG(res, irg);
588 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
595 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
596 res = optimize_node(res);
597 IRN_VRFY_IRG(res, irg);
602 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
603 ir_node *store, ir_node *adr, ir_mode *mode)
610 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
611 res->attr.load.exc.pin_state = op_pin_state_pinned;
612 res->attr.load.load_mode = mode;
613 res->attr.load.volatility = volatility_non_volatile;
614 res = optimize_node(res);
615 IRN_VRFY_IRG(res, irg);
620 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
621 ir_node *store, ir_node *adr, ir_node *val)
629 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
630 res->attr.store.exc.pin_state = op_pin_state_pinned;
631 res->attr.store.volatility = volatility_non_volatile;
632 res = optimize_node(res);
633 IRN_VRFY_IRG(res, irg);
638 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
639 ir_node *size, type *alloc_type, where_alloc where)
646 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
647 res->attr.a.exc.pin_state = op_pin_state_pinned;
648 res->attr.a.where = where;
649 res->attr.a.type = alloc_type;
650 res = optimize_node(res);
651 IRN_VRFY_IRG(res, irg);
656 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
657 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
665 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
666 res->attr.f.where = where;
667 res->attr.f.type = free_type;
668 res = optimize_node(res);
669 IRN_VRFY_IRG(res, irg);
674 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
675 int arity, ir_node **in, entity *ent)
681 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
684 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
687 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
688 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
689 res->attr.s.ent = ent;
690 res = optimize_node(res);
691 IRN_VRFY_IRG(res, irg);
696 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
697 ir_node *objptr, type *ent)
704 NEW_ARR_A(ir_node *, r_in, r_arity);
708 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
709 res->attr.io.ent = ent;
711 /* res = optimize(res); */
712 IRN_VRFY_IRG(res, irg);
717 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
718 symconst_kind symkind, type *tp) {
722 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
727 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
729 res->attr.i.num = symkind;
730 res->attr.i.sym = value;
733 res = optimize_node(res);
734 IRN_VRFY_IRG(res, irg);
739 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
740 symconst_kind symkind)
742 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
746 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
747 symconst_symbol sym = {(type *)symbol};
748 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
751 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
752 symconst_symbol sym = {(type *)symbol};
753 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
756 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
757 symconst_symbol sym = {symbol};
758 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
761 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
762 symconst_symbol sym = {symbol};
763 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
767 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
771 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
772 res = optimize_node(res);
773 IRN_VRFY_IRG(res, irg);
778 new_rd_Bad (ir_graph *irg)
784 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
786 ir_node *in[2], *res;
790 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
791 res->attr.confirm_cmp = cmp;
792 res = optimize_node (res);
793 IRN_VRFY_IRG(res, irg);
798 new_rd_Unknown (ir_graph *irg, ir_mode *m)
800 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
804 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
809 in[0] = get_Call_ptr(call);
810 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
811 /* res->attr.callbegin.irg = irg; */
812 res->attr.callbegin.call = call;
813 res = optimize_node(res);
814 IRN_VRFY_IRG(res, irg);
819 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
823 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
825 IRN_VRFY_IRG(res, irg);
830 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
834 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
835 irg->end_except = res;
836 IRN_VRFY_IRG (res, irg);
841 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
845 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
846 res = optimize_node(res);
847 IRN_VRFY_IRG(res, irg);
852 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
857 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
858 res->attr.filter.proj = proj;
859 res->attr.filter.in_cg = NULL;
860 res->attr.filter.backedge = NULL;
863 assert(get_Proj_pred(res));
864 assert(get_nodes_block(get_Proj_pred(res)));
866 res = optimize_node(res);
867 IRN_VRFY_IRG(res, irg);
872 new_rd_NoMem (ir_graph *irg) {
877 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
878 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
887 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
890 res = optimize_node(res);
891 IRN_VRFY_IRG(res, irg);
896 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
897 return new_rd_Block(NULL, irg, arity, in);
899 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
900 return new_rd_Start(NULL, irg, block);
902 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
903 return new_rd_End(NULL, irg, block);
905 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
906 return new_rd_Jmp(NULL, irg, block);
908 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
909 return new_rd_Cond(NULL, irg, block, c);
911 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
912 ir_node *store, int arity, ir_node **in) {
913 return new_rd_Return(NULL, irg, block, store, arity, in);
915 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
916 ir_node *store, ir_node *obj) {
917 return new_rd_Raise(NULL, irg, block, store, obj);
919 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
920 ir_mode *mode, tarval *con) {
921 return new_rd_Const(NULL, irg, block, mode, con);
924 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
925 ir_mode *mode, long value) {
926 return new_rd_Const_long(NULL, irg, block, mode, value);
930 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
931 symconst_symbol value, symconst_kind symkind) {
932 return new_rd_SymConst(NULL, irg, block, value, symkind);
934 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
935 ir_node *objptr, int n_index, ir_node **index,
937 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
939 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
941 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
943 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
944 ir_node *callee, int arity, ir_node **in,
946 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
948 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
949 ir_node *op1, ir_node *op2, ir_mode *mode) {
950 return new_rd_Add(NULL, irg, block, op1, op2, mode);
952 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
953 ir_node *op1, ir_node *op2, ir_mode *mode) {
954 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
956 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
957 ir_node *op, ir_mode *mode) {
958 return new_rd_Minus(NULL, irg, block, op, mode);
960 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
961 ir_node *op1, ir_node *op2, ir_mode *mode) {
962 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
964 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
965 ir_node *memop, ir_node *op1, ir_node *op2) {
966 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
968 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
969 ir_node *memop, ir_node *op1, ir_node *op2) {
970 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
972 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
973 ir_node *memop, ir_node *op1, ir_node *op2) {
974 return new_rd_Div(NULL, irg, block, memop, op1, op2);
976 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
977 ir_node *memop, ir_node *op1, ir_node *op2) {
978 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
980 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
981 ir_node *op, ir_mode *mode) {
982 return new_rd_Abs(NULL, irg, block, op, mode);
984 ir_node *new_r_And (ir_graph *irg, ir_node *block,
985 ir_node *op1, ir_node *op2, ir_mode *mode) {
986 return new_rd_And(NULL, irg, block, op1, op2, mode);
988 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
989 ir_node *op1, ir_node *op2, ir_mode *mode) {
990 return new_rd_Or(NULL, irg, block, op1, op2, mode);
992 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
993 ir_node *op1, ir_node *op2, ir_mode *mode) {
994 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
996 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
997 ir_node *op, ir_mode *mode) {
998 return new_rd_Not(NULL, irg, block, op, mode);
1000 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1001 ir_node *op1, ir_node *op2) {
1002 return new_rd_Cmp(NULL, irg, block, op1, op2);
1004 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1005 ir_node *op, ir_node *k, ir_mode *mode) {
1006 return new_rd_Shl(NULL, irg, block, op, k, mode);
1008 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1009 ir_node *op, ir_node *k, ir_mode *mode) {
1010 return new_rd_Shr(NULL, irg, block, op, k, mode);
1012 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1013 ir_node *op, ir_node *k, ir_mode *mode) {
1014 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1016 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1017 ir_node *op, ir_node *k, ir_mode *mode) {
1018 return new_rd_Rot(NULL, irg, block, op, k, mode);
1020 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1021 ir_node *op, ir_mode *mode) {
1022 return new_rd_Conv(NULL, irg, block, op, mode);
1024 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1025 return new_rd_Cast(NULL, irg, block, op, to_tp);
1027 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1028 ir_node **in, ir_mode *mode) {
1029 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1031 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1032 ir_node *store, ir_node *adr, ir_mode *mode) {
1033 return new_rd_Load(NULL, irg, block, store, adr, mode);
1035 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1036 ir_node *store, ir_node *adr, ir_node *val) {
1037 return new_rd_Store(NULL, irg, block, store, adr, val);
1039 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1040 ir_node *size, type *alloc_type, where_alloc where) {
1041 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1043 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1044 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1045 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1047 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1048 return new_rd_Sync(NULL, irg, block, arity, in);
1050 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1051 ir_mode *mode, long proj) {
1052 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1054 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1056 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1058 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1059 int arity, ir_node **in) {
1060 return new_rd_Tuple(NULL, irg, block, arity, in );
1062 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1063 ir_node *val, ir_mode *mode) {
1064 return new_rd_Id(NULL, irg, block, val, mode);
1066 ir_node *new_r_Bad (ir_graph *irg) {
1067 return new_rd_Bad(irg);
1069 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1070 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1072 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1073 return new_rd_Unknown(irg, m);
1075 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1076 return new_rd_CallBegin(NULL, irg, block, callee);
1078 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1079 return new_rd_EndReg(NULL, irg, block);
1081 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1082 return new_rd_EndExcept(NULL, irg, block);
1084 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1085 return new_rd_Break(NULL, irg, block);
1087 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1088 ir_mode *mode, long proj) {
1089 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1091 ir_node *new_r_NoMem (ir_graph *irg) {
1092 return new_rd_NoMem(irg);
1094 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1095 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1096 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1100 /** ********************/
1101 /** public interfaces */
1102 /** construction tools */
1106 * - create a new Start node in the current block
1108 * @return s - pointer to the created Start node
1113 new_d_Start (dbg_info* db)
1117 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1118 op_Start, mode_T, 0, NULL);
1119 /* res->attr.start.irg = current_ir_graph; */
1121 res = optimize_node(res);
1122 IRN_VRFY_IRG(res, current_ir_graph);
1127 new_d_End (dbg_info* db)
1130 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1131 op_End, mode_X, -1, NULL);
1132 res = optimize_node(res);
1133 IRN_VRFY_IRG(res, current_ir_graph);
1138 /* Constructs a Block with a fixed number of predecessors.
1139 Does set current_block. Can be used with automatic Phi
1140 node construction. */
1142 new_d_Block (dbg_info* db, int arity, ir_node **in)
1146 bool has_unknown = false;
1148 res = new_rd_Block(db, current_ir_graph, arity, in);
1150 /* Create and initialize array for Phi-node construction. */
1151 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1152 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1153 current_ir_graph->n_loc);
1154 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1157 for (i = arity-1; i >= 0; i--)
1158 if (get_irn_op(in[i]) == op_Unknown) {
1163 if (!has_unknown) res = optimize_node(res);
1164 current_ir_graph->current_block = res;
1166 IRN_VRFY_IRG(res, current_ir_graph);
1171 /* ***********************************************************************/
1172 /* Methods necessary for automatic Phi node creation */
1174 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1175 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1176 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1177 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1179 Call Graph: ( A ---> B == A "calls" B)
1181 get_value mature_immBlock
1189 get_r_value_internal |
1193 new_rd_Phi0 new_rd_Phi_in
1195 * *************************************************************************** */
1197 /** Creates a Phi node with 0 predecessors */
1198 static INLINE ir_node *
1199 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1203 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1204 IRN_VRFY_IRG(res, irg);
1208 /* There are two implementations of the Phi node construction. The first
1209 is faster, but does not work for blocks with more than 2 predecessors.
1210 The second works always but is slower and causes more unnecessary Phi
1212 Select the implementations by the following preprocessor flag set in
1214 #if USE_FAST_PHI_CONSTRUCTION
1216 /* This is a stack used for allocating and deallocating nodes in
1217 new_rd_Phi_in. The original implementation used the obstack
1218 to model this stack, now it is explicit. This reduces side effects.
1220 #if USE_EXPLICIT_PHI_IN_STACK
1222 new_Phi_in_stack(void) {
1225 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1227 res->stack = NEW_ARR_F (ir_node *, 0);
1234 free_Phi_in_stack(Phi_in_stack *s) {
1235 DEL_ARR_F(s->stack);
1239 free_to_Phi_in_stack(ir_node *phi) {
1240 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1241 current_ir_graph->Phi_in_stack->pos)
1242 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1244 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1246 (current_ir_graph->Phi_in_stack->pos)++;
1249 static INLINE ir_node *
1250 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1251 int arity, ir_node **in) {
1253 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1254 int pos = current_ir_graph->Phi_in_stack->pos;
1258 /* We need to allocate a new node */
1259 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1260 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1262 /* reuse the old node and initialize it again. */
1265 assert (res->kind == k_ir_node);
1266 assert (res->op == op_Phi);
1270 assert (arity >= 0);
1271 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1272 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1274 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1276 (current_ir_graph->Phi_in_stack->pos)--;
1280 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1282 /* Creates a Phi node with a given, fixed array **in of predecessors.
1283 If the Phi node is unnecessary, as the same value reaches the block
1284 through all control flow paths, it is eliminated and the value
1285 returned directly. This constructor is only intended for use in
1286 the automatic Phi node generation triggered by get_value or mature.
1287 The implementation is quite tricky and depends on the fact, that
1288 the nodes are allocated on a stack:
1289 The in array contains predecessors and NULLs. The NULLs appear,
1290 if get_r_value_internal, that computed the predecessors, reached
1291 the same block on two paths. In this case the same value reaches
1292 this block on both paths, there is no definition in between. We need
1293 not allocate a Phi where these path's merge, but we have to communicate
1294 this fact to the caller. This happens by returning a pointer to the
1295 node the caller _will_ allocate. (Yes, we predict the address. We can
1296 do so because the nodes are allocated on the obstack.) The caller then
1297 finds a pointer to itself and, when this routine is called again,
1300 static INLINE ir_node *
1301 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1304 ir_node *res, *known;
1306 /* Allocate a new node on the obstack. This can return a node to
1307 which some of the pointers in the in-array already point.
1308 Attention: the constructor copies the in array, i.e., the later
1309 changes to the array in this routine do not affect the
1310 constructed node! If the in array contains NULLs, there will be
1311 missing predecessors in the returned node. Is this a possible
1312 internal state of the Phi node generation? */
1313 #if USE_EXPLICIT_PHI_IN_STACK
1314 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1316 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1317 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1320 /* The in-array can contain NULLs. These were returned by
1321 get_r_value_internal if it reached the same block/definition on a
1322 second path. The NULLs are replaced by the node itself to
1323 simplify the test in the next loop. */
1324 for (i = 0; i < ins; ++i) {
1329 /* This loop checks whether the Phi has more than one predecessor.
1330 If so, it is a real Phi node and we break the loop. Else the Phi
1331 node merges the same definition on several paths and therefore is
1333 for (i = 0; i < ins; ++i) {
1334 if (in[i] == res || in[i] == known)
1343 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1345 #if USE_EXPLICIT_PHI_IN_STACK
1346 free_to_Phi_in_stack(res);
1348 edges_node_deleted(res, current_ir_graph);
1349 obstack_free(current_ir_graph->obst, res);
1353 res = optimize_node (res);
1354 IRN_VRFY_IRG(res, irg);
1357 /* return the pointer to the Phi node. This node might be deallocated! */
1362 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1365 allocates and returns this node. The routine called to allocate the
1366 node might optimize it away and return a real value, or even a pointer
1367 to a deallocated Phi node on top of the obstack!
1368 This function is called with an in-array of proper size. **/
1370 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1372 ir_node *prevBlock, *res;
1375 /* This loop goes to all predecessor blocks of the block the Phi node is in
1376 and there finds the operands of the Phi node by calling
1377 get_r_value_internal. */
1378 for (i = 1; i <= ins; ++i) {
1379 assert (block->in[i]);
1380 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1382 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1385 /* After collecting all predecessors into the array nin a new Phi node
1386 with these predecessors is created. This constructor contains an
1387 optimization: If all predecessors of the Phi node are identical it
1388 returns the only operand instead of a new Phi node. If the value
1389 passes two different control flow edges without being defined, and
1390 this is the second path treated, a pointer to the node that will be
1391 allocated for the first path (recursion) is returned. We already
1392 know the address of this node, as it is the next node to be allocated
1393 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1394 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1396 /* Now we now the value for "pos" and can enter it in the array with
1397 all known local variables. Attention: this might be a pointer to
1398 a node, that later will be allocated!!! See new_rd_Phi_in.
1399 If this is called in mature, after some set_value in the same block,
1400 the proper value must not be overwritten:
1402 get_value (makes Phi0, put's it into graph_arr)
1403 set_value (overwrites Phi0 in graph_arr)
1404 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1407 if (!block->attr.block.graph_arr[pos]) {
1408 block->attr.block.graph_arr[pos] = res;
1410 /* printf(" value already computed by %s\n",
1411 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1417 /* This function returns the last definition of a variable. In case
1418 this variable was last defined in a previous block, Phi nodes are
1419 inserted. If the part of the firm graph containing the definition
1420 is not yet constructed, a dummy Phi node is returned. */
1422 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1425 /* There are 4 cases to treat.
1427 1. The block is not mature and we visit it the first time. We can not
1428 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1429 predecessors is returned. This node is added to the linked list (field
1430 "link") of the containing block to be completed when this block is
1431 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1434 2. The value is already known in this block, graph_arr[pos] is set and we
1435 visit the block the first time. We can return the value without
1436 creating any new nodes.
1438 3. The block is mature and we visit it the first time. A Phi node needs
1439 to be created (phi_merge). If the Phi is not needed, as all it's
1440 operands are the same value reaching the block through different
1441 paths, it's optimized away and the value itself is returned.
1443 4. The block is mature, and we visit it the second time. Now two
1444 subcases are possible:
1445 * The value was computed completely the last time we were here. This
1446 is the case if there is no loop. We can return the proper value.
1447 * The recursion that visited this node and set the flag did not
1448 return yet. We are computing a value in a loop and need to
1449 break the recursion without knowing the result yet.
1450 @@@ strange case. Straight forward we would create a Phi before
1451 starting the computation of it's predecessors. In this case we will
1452 find a Phi here in any case. The problem is that this implementation
1453 only creates a Phi after computing the predecessors, so that it is
1454 hard to compute self references of this Phi. @@@
1455 There is no simple check for the second subcase. Therefore we check
1456 for a second visit and treat all such cases as the second subcase.
1457 Anyways, the basic situation is the same: we reached a block
1458 on two paths without finding a definition of the value: No Phi
1459 nodes are needed on both paths.
1460 We return this information "Two paths, no Phi needed" by a very tricky
1461 implementation that relies on the fact that an obstack is a stack and
1462 will return a node with the same address on different allocations.
1463 Look also at phi_merge and new_rd_phi_in to understand this.
1464 @@@ Unfortunately this does not work, see testprogram
1465 three_cfpred_example.
1469 /* case 4 -- already visited. */
1470 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1472 /* visited the first time */
1473 set_irn_visited(block, get_irg_visited(current_ir_graph));
1475 /* Get the local valid value */
1476 res = block->attr.block.graph_arr[pos];
1478 /* case 2 -- If the value is actually computed, return it. */
1479 if (res) return res;
1481 if (block->attr.block.matured) { /* case 3 */
1483 /* The Phi has the same amount of ins as the corresponding block. */
1484 int ins = get_irn_arity(block);
1486 NEW_ARR_A (ir_node *, nin, ins);
1488 /* Phi merge collects the predecessors and then creates a node. */
1489 res = phi_merge (block, pos, mode, nin, ins);
1491 } else { /* case 1 */
1492 /* The block is not mature, we don't know how many in's are needed. A Phi
1493 with zero predecessors is created. Such a Phi node is called Phi0
1494 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1495 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1497 The Phi0 has to remember the pos of it's internal value. If the real
1498 Phi is computed, pos is used to update the array with the local
1501 res = new_rd_Phi0 (current_ir_graph, block, mode);
1502 res->attr.phi0_pos = pos;
1503 res->link = block->link;
1507 /* If we get here, the frontend missed a use-before-definition error */
1510 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1511 assert (mode->code >= irm_F && mode->code <= irm_P);
1512 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1513 tarval_mode_null[mode->code]);
1516 /* The local valid value is available now. */
1517 block->attr.block.graph_arr[pos] = res;
1525 it starts the recursion. This causes an Id at the entry of
1526 every block that has no definition of the value! **/
1528 #if USE_EXPLICIT_PHI_IN_STACK
1530 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1531 void free_Phi_in_stack(Phi_in_stack *s) { }
1534 static INLINE ir_node *
1535 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1536 ir_node **in, int ins, ir_node *phi0)
1539 ir_node *res, *known;
1541 /* Allocate a new node on the obstack. The allocation copies the in
1543 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1544 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1546 /* This loop checks whether the Phi has more than one predecessor.
1547 If so, it is a real Phi node and we break the loop. Else the
1548 Phi node merges the same definition on several paths and therefore
1549 is not needed. Don't consider Bad nodes! */
1551 for (i=0; i < ins; ++i)
1555 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1557 /* Optimize self referencing Phis: We can't detect them yet properly, as
1558 they still refer to the Phi0 they will replace. So replace right now. */
1559 if (phi0 && in[i] == phi0) in[i] = res;
1561 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1569 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1572 edges_node_deleted(res, current_ir_graph);
1573 obstack_free (current_ir_graph->obst, res);
1574 if (is_Phi(known)) {
1575 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1576 order, an enclosing Phi know may get superfluous. */
1577 res = optimize_in_place_2(known);
1579 exchange(known, res);
1584 /* A undefined value, e.g., in unreachable code. */
1588 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1589 IRN_VRFY_IRG(res, irg);
1590 /* Memory Phis in endless loops must be kept alive.
1591 As we can't distinguish these easily we keep all of them alive. */
1592 if ((res->op == op_Phi) && (mode == mode_M))
1593 add_End_keepalive(irg->end, res);
1600 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1602 #if PRECISE_EXC_CONTEXT
1604 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1606 /* Construct a new frag_array for node n.
1607 Copy the content from the current graph_arr of the corresponding block:
1608 this is the current state.
1609 Set ProjM(n) as current memory state.
1610 Further the last entry in frag_arr of current block points to n. This
1611 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1613 static INLINE ir_node ** new_frag_arr (ir_node *n)
1618 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1619 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1620 sizeof(ir_node *)*current_ir_graph->n_loc);
1622 /* turn off optimization before allocating Proj nodes, as res isn't
1624 opt = get_opt_optimize(); set_optimize(0);
1625 /* Here we rely on the fact that all frag ops have Memory as first result! */
1626 if (get_irn_op(n) == op_Call)
1627 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1629 assert((pn_Quot_M == pn_DivMod_M) &&
1630 (pn_Quot_M == pn_Div_M) &&
1631 (pn_Quot_M == pn_Mod_M) &&
1632 (pn_Quot_M == pn_Load_M) &&
1633 (pn_Quot_M == pn_Store_M) &&
1634 (pn_Quot_M == pn_Alloc_M) );
1635 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1639 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1644 * returns the frag_arr from a node
1646 static INLINE ir_node **
1647 get_frag_arr (ir_node *n) {
1648 switch (get_irn_opcode(n)) {
1650 return n->attr.call.exc.frag_arr;
1652 return n->attr.a.exc.frag_arr;
1654 return n->attr.load.exc.frag_arr;
1656 return n->attr.store.exc.frag_arr;
1658 return n->attr.except.frag_arr;
1663 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1665 if (!frag_arr[pos]) frag_arr[pos] = val;
1666 if (frag_arr[current_ir_graph->n_loc - 1]) {
1667 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1668 assert(arr != frag_arr && "Endless recursion detected");
1669 set_frag_value(arr, pos, val);
1674 for (i = 0; i < 1000; ++i) {
1675 if (!frag_arr[pos]) {
1676 frag_arr[pos] = val;
1678 if (frag_arr[current_ir_graph->n_loc - 1]) {
1679 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1685 assert(0 && "potential endless recursion");
1690 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1694 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1696 frag_arr = get_frag_arr(cfOp);
1697 res = frag_arr[pos];
1699 if (block->attr.block.graph_arr[pos]) {
1700 /* There was a set_value after the cfOp and no get_value before that
1701 set_value. We must build a Phi node now. */
1702 if (block->attr.block.matured) {
1703 int ins = get_irn_arity(block);
1705 NEW_ARR_A (ir_node *, nin, ins);
1706 res = phi_merge(block, pos, mode, nin, ins);
1708 res = new_rd_Phi0 (current_ir_graph, block, mode);
1709 res->attr.phi0_pos = pos;
1710 res->link = block->link;
1714 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1715 but this should be better: (remove comment if this works) */
1716 /* It's a Phi, we can write this into all graph_arrs with NULL */
1717 set_frag_value(block->attr.block.graph_arr, pos, res);
1719 res = get_r_value_internal(block, pos, mode);
1720 set_frag_value(block->attr.block.graph_arr, pos, res);
1728 computes the predecessors for the real phi node, and then
1729 allocates and returns this node. The routine called to allocate the
1730 node might optimize it away and return a real value.
1731 This function must be called with an in-array of proper size. **/
1733 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1735 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1738 /* If this block has no value at pos create a Phi0 and remember it
1739 in graph_arr to break recursions.
1740 Else we may not set graph_arr as there a later value is remembered. */
1742 if (!block->attr.block.graph_arr[pos]) {
1743 if (block == get_irg_start_block(current_ir_graph)) {
1744 /* Collapsing to Bad tarvals is no good idea.
1745 So we call a user-supplied routine here that deals with this case as
1746 appropriate for the given language. Sorryly the only help we can give
1747 here is the position.
1749 Even if all variables are defined before use, it can happen that
1750 we get to the start block, if a cond has been replaced by a tuple
1751 (bad, jmp). In this case we call the function needlessly, eventually
1752 generating an non existant error.
1753 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1756 if (default_initialize_local_variable)
1757 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1759 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1760 /* We don't need to care about exception ops in the start block.
1761 There are none by definition. */
1762 return block->attr.block.graph_arr[pos];
1764 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1765 block->attr.block.graph_arr[pos] = phi0;
1766 #if PRECISE_EXC_CONTEXT
1767 if (get_opt_precise_exc_context()) {
1768 /* Set graph_arr for fragile ops. Also here we should break recursion.
1769 We could choose a cyclic path through an cfop. But the recursion would
1770 break at some point. */
1771 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1777 /* This loop goes to all predecessor blocks of the block the Phi node
1778 is in and there finds the operands of the Phi node by calling
1779 get_r_value_internal. */
1780 for (i = 1; i <= ins; ++i) {
1781 prevCfOp = skip_Proj(block->in[i]);
1783 if (is_Bad(prevCfOp)) {
1784 /* In case a Cond has been optimized we would get right to the start block
1785 with an invalid definition. */
1786 nin[i-1] = new_Bad();
1789 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1791 if (!is_Bad(prevBlock)) {
1792 #if PRECISE_EXC_CONTEXT
1793 if (get_opt_precise_exc_context() &&
1794 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1795 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1796 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1799 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1801 nin[i-1] = new_Bad();
1805 /* We want to pass the Phi0 node to the constructor: this finds additional
1806 optimization possibilities.
1807 The Phi0 node either is allocated in this function, or it comes from
1808 a former call to get_r_value_internal. In this case we may not yet
1809 exchange phi0, as this is done in mature_immBlock. */
1811 phi0_all = block->attr.block.graph_arr[pos];
1812 if (!((get_irn_op(phi0_all) == op_Phi) &&
1813 (get_irn_arity(phi0_all) == 0) &&
1814 (get_nodes_block(phi0_all) == block)))
1820 /* After collecting all predecessors into the array nin a new Phi node
1821 with these predecessors is created. This constructor contains an
1822 optimization: If all predecessors of the Phi node are identical it
1823 returns the only operand instead of a new Phi node. */
1824 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1826 /* In case we allocated a Phi0 node at the beginning of this procedure,
1827 we need to exchange this Phi0 with the real Phi. */
1829 exchange(phi0, res);
1830 block->attr.block.graph_arr[pos] = res;
1831 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1832 only an optimization. */
1838 /* This function returns the last definition of a variable. In case
1839 this variable was last defined in a previous block, Phi nodes are
1840 inserted. If the part of the firm graph containing the definition
1841 is not yet constructed, a dummy Phi node is returned. */
1843 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1846 /* There are 4 cases to treat.
1848 1. The block is not mature and we visit it the first time. We can not
1849 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1850 predecessors is returned. This node is added to the linked list (field
1851 "link") of the containing block to be completed when this block is
1852 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1855 2. The value is already known in this block, graph_arr[pos] is set and we
1856 visit the block the first time. We can return the value without
1857 creating any new nodes.
1859 3. The block is mature and we visit it the first time. A Phi node needs
1860 to be created (phi_merge). If the Phi is not needed, as all it's
1861 operands are the same value reaching the block through different
1862 paths, it's optimized away and the value itself is returned.
1864 4. The block is mature, and we visit it the second time. Now two
1865 subcases are possible:
1866 * The value was computed completely the last time we were here. This
1867 is the case if there is no loop. We can return the proper value.
1868 * The recursion that visited this node and set the flag did not
1869 return yet. We are computing a value in a loop and need to
1870 break the recursion. This case only happens if we visited
1871 the same block with phi_merge before, which inserted a Phi0.
1872 So we return the Phi0.
1875 /* case 4 -- already visited. */
1876 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1877 /* As phi_merge allocates a Phi0 this value is always defined. Here
1878 is the critical difference of the two algorithms. */
1879 assert(block->attr.block.graph_arr[pos]);
1880 return block->attr.block.graph_arr[pos];
1883 /* visited the first time */
1884 set_irn_visited(block, get_irg_visited(current_ir_graph));
1886 /* Get the local valid value */
1887 res = block->attr.block.graph_arr[pos];
1889 /* case 2 -- If the value is actually computed, return it. */
1890 if (res) { return res; };
1892 if (block->attr.block.matured) { /* case 3 */
1894 /* The Phi has the same amount of ins as the corresponding block. */
1895 int ins = get_irn_arity(block);
1897 NEW_ARR_A (ir_node *, nin, ins);
1899 /* Phi merge collects the predecessors and then creates a node. */
1900 res = phi_merge (block, pos, mode, nin, ins);
1902 } else { /* case 1 */
1903 /* The block is not mature, we don't know how many in's are needed. A Phi
1904 with zero predecessors is created. Such a Phi node is called Phi0
1905 node. The Phi0 is then added to the list of Phi0 nodes in this block
1906 to be matured by mature_immBlock later.
1907 The Phi0 has to remember the pos of it's internal value. If the real
1908 Phi is computed, pos is used to update the array with the local
1910 res = new_rd_Phi0 (current_ir_graph, block, mode);
1911 res->attr.phi0_pos = pos;
1912 res->link = block->link;
1916 /* If we get here, the frontend missed a use-before-definition error */
1919 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1920 assert (mode->code >= irm_F && mode->code <= irm_P);
1921 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1922 get_mode_null(mode));
1925 /* The local valid value is available now. */
1926 block->attr.block.graph_arr[pos] = res;
1931 #endif /* USE_FAST_PHI_CONSTRUCTION */
1933 /* ************************************************************************** */
1935 /** Finalize a Block node, when all control flows are known. */
1936 /** Acceptable parameters are only Block nodes. */
1938 mature_immBlock (ir_node *block)
1945 assert (get_irn_opcode(block) == iro_Block);
1946 /* @@@ should be commented in
1947 assert (!get_Block_matured(block) && "Block already matured"); */
1949 if (!get_Block_matured(block)) {
1950 ins = ARR_LEN (block->in)-1;
1951 /* Fix block parameters */
1952 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1954 /* An array for building the Phi nodes. */
1955 NEW_ARR_A (ir_node *, nin, ins);
1957 /* Traverse a chain of Phi nodes attached to this block and mature
1959 for (n = block->link; n; n=next) {
1960 inc_irg_visited(current_ir_graph);
1962 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1965 block->attr.block.matured = 1;
1967 /* Now, as the block is a finished firm node, we can optimize it.
1968 Since other nodes have been allocated since the block was created
1969 we can not free the node on the obstack. Therefore we have to call
1971 Unfortunately the optimization does not change a lot, as all allocated
1972 nodes refer to the unoptimized node.
1973 We can call _2, as global cse has no effect on blocks. */
1974 block = optimize_in_place_2(block);
1975 IRN_VRFY_IRG(block, current_ir_graph);
1980 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1982 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1987 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1989 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1994 new_d_Const_long(dbg_info* db, ir_mode *mode, long value)
1996 return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value);
2000 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
2002 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
2008 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
2010 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
2015 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
2017 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2022 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2025 assert(arg->op == op_Cond);
2026 arg->attr.c.kind = fragmentary;
2027 arg->attr.c.default_proj = max_proj;
2028 res = new_Proj (arg, mode_X, max_proj);
2033 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2035 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2040 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2042 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2046 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2048 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2053 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2055 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2060 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2062 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2068 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2070 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2075 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2077 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2082 * allocate the frag array
2084 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2085 if (get_opt_precise_exc_context()) {
2086 if ((current_ir_graph->phase_state == phase_building) &&
2087 (get_irn_op(res) == op) && /* Could be optimized away. */
2088 !*frag_store) /* Could be a cse where the arr is already set. */ {
2089 *frag_store = new_frag_arr(res);
2096 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2099 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2101 res->attr.except.pin_state = op_pin_state_pinned;
2102 #if PRECISE_EXC_CONTEXT
2103 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2110 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2113 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2115 res->attr.except.pin_state = op_pin_state_pinned;
2116 #if PRECISE_EXC_CONTEXT
2117 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2124 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2127 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2129 res->attr.except.pin_state = op_pin_state_pinned;
2130 #if PRECISE_EXC_CONTEXT
2131 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2138 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2141 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2143 res->attr.except.pin_state = op_pin_state_pinned;
2144 #if PRECISE_EXC_CONTEXT
2145 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2152 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2154 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2159 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2161 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2166 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2168 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2173 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2175 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2180 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2182 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2187 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2189 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2194 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2196 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2201 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2203 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2208 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2210 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2215 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2217 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2222 new_d_Jmp (dbg_info* db)
2224 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2228 new_d_Cond (dbg_info* db, ir_node *c)
2230 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2234 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2238 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2239 store, callee, arity, in, tp);
2240 #if PRECISE_EXC_CONTEXT
2241 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2248 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2250 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2255 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2257 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2262 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2265 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2267 #if PRECISE_EXC_CONTEXT
2268 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2275 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2278 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2280 #if PRECISE_EXC_CONTEXT
2281 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2288 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2292 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2293 store, size, alloc_type, where);
2294 #if PRECISE_EXC_CONTEXT
2295 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2302 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2303 ir_node *size, type *free_type, where_alloc where)
2305 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2306 store, ptr, size, free_type, where);
2310 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2311 /* GL: objptr was called frame before. Frame was a bad choice for the name
2312 as the operand could as well be a pointer to a dynamic object. */
2314 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2315 store, objptr, 0, NULL, ent);
2319 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2321 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2322 store, objptr, n_index, index, sel);
2326 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2328 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2329 store, objptr, ent));
2333 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2335 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2340 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2342 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2347 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2349 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2357 return _new_d_Bad();
2361 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2363 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2368 new_d_Unknown (ir_mode *m)
2370 return new_rd_Unknown(current_ir_graph, m);
2374 new_d_CallBegin (dbg_info *db, ir_node *call)
2377 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2382 new_d_EndReg (dbg_info *db)
2385 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2390 new_d_EndExcept (dbg_info *db)
2393 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2398 new_d_Break (dbg_info *db)
2400 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2404 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2406 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2413 return _new_d_NoMem();
2417 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2418 ir_node *ir_true, ir_mode *mode) {
2419 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2420 sel, ir_false, ir_true, mode);
2423 /* ********************************************************************* */
2424 /* Comfortable interface with automatic Phi node construction. */
2425 /* (Uses also constructors of ?? interface, except new_Block. */
2426 /* ********************************************************************* */
2428 /* * Block construction **/
2429 /* immature Block without predecessors */
2430 ir_node *new_d_immBlock (dbg_info* db) {
2433 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2434 /* creates a new dynamic in-array as length of in is -1 */
2435 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2436 current_ir_graph->current_block = res;
2437 res->attr.block.matured = 0;
2438 res->attr.block.dead = 0;
2439 /* res->attr.block.exc = exc_normal; */
2440 /* res->attr.block.handler_entry = 0; */
2441 res->attr.block.irg = current_ir_graph;
2442 res->attr.block.backedge = NULL;
2443 res->attr.block.in_cg = NULL;
2444 res->attr.block.cg_backedge = NULL;
2445 set_Block_block_visited(res, 0);
2447 /* Create and initialize array for Phi-node construction. */
2448 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2449 current_ir_graph->n_loc);
2450 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2452 /* Immature block may not be optimized! */
2453 IRN_VRFY_IRG(res, current_ir_graph);
2459 new_immBlock (void) {
2460 return new_d_immBlock(NULL);
2463 /* add an adge to a jmp/control flow node */
2465 add_immBlock_pred (ir_node *block, ir_node *jmp)
2467 if (block->attr.block.matured) {
2468 assert(0 && "Error: Block already matured!\n");
2471 assert(jmp != NULL);
2472 ARR_APP1(ir_node *, block->in, jmp);
2476 /* changing the current block */
2478 set_cur_block (ir_node *target)
2480 current_ir_graph->current_block = target;
2483 /* ************************ */
2484 /* parameter administration */
2486 /* get a value from the parameter array from the current block by its index */
2488 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2490 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2491 inc_irg_visited(current_ir_graph);
2493 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2495 /* get a value from the parameter array from the current block by its index */
2497 get_value (int pos, ir_mode *mode)
2499 return get_d_value(NULL, pos, mode);
2502 /* set a value at position pos in the parameter array from the current block */
2504 set_value (int pos, ir_node *value)
2506 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2507 assert(pos+1 < current_ir_graph->n_loc);
2508 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2511 /* get the current store */
2515 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2516 /* GL: one could call get_value instead */
2517 inc_irg_visited(current_ir_graph);
2518 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2521 /* set the current store */
2523 set_store (ir_node *store)
2525 /* GL: one could call set_value instead */
2526 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2527 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2531 keep_alive (ir_node *ka)
2533 add_End_keepalive(current_ir_graph->end, ka);
2536 /** Useful access routines **/
2537 /* Returns the current block of the current graph. To set the current
2538 block use set_cur_block. */
2539 ir_node *get_cur_block() {
2540 return get_irg_current_block(current_ir_graph);
2543 /* Returns the frame type of the current graph */
2544 type *get_cur_frame_type() {
2545 return get_irg_frame_type(current_ir_graph);
2549 /* ********************************************************************* */
2552 /* call once for each run of the library */
2554 init_cons(uninitialized_local_variable_func_t *func)
2556 default_initialize_local_variable = func;
2559 /* call for each graph */
2561 finalize_cons (ir_graph *irg) {
2562 irg->phase_state = phase_high;
2566 ir_node *new_Block(int arity, ir_node **in) {
2567 return new_d_Block(NULL, arity, in);
2569 ir_node *new_Start (void) {
2570 return new_d_Start(NULL);
2572 ir_node *new_End (void) {
2573 return new_d_End(NULL);
2575 ir_node *new_Jmp (void) {
2576 return new_d_Jmp(NULL);
2578 ir_node *new_Cond (ir_node *c) {
2579 return new_d_Cond(NULL, c);
2581 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2582 return new_d_Return(NULL, store, arity, in);
2584 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2585 return new_d_Raise(NULL, store, obj);
2587 ir_node *new_Const (ir_mode *mode, tarval *con) {
2588 return new_d_Const(NULL, mode, con);
2591 ir_node *new_Const_long(ir_mode *mode, long value)
2593 return new_d_Const_long(NULL, mode, value);
2596 ir_node *new_Const_type(tarval *con, type *tp) {
2597 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2600 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2601 return new_d_SymConst(NULL, value, kind);
2603 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2604 return new_d_simpleSel(NULL, store, objptr, ent);
2606 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2608 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2610 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2611 return new_d_InstOf (NULL, store, objptr, ent);
2613 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2615 return new_d_Call(NULL, store, callee, arity, in, tp);
2617 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2618 return new_d_Add(NULL, op1, op2, mode);
2620 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2621 return new_d_Sub(NULL, op1, op2, mode);
2623 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2624 return new_d_Minus(NULL, op, mode);
2626 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2627 return new_d_Mul(NULL, op1, op2, mode);
2629 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2630 return new_d_Quot(NULL, memop, op1, op2);
2632 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2633 return new_d_DivMod(NULL, memop, op1, op2);
2635 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2636 return new_d_Div(NULL, memop, op1, op2);
2638 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2639 return new_d_Mod(NULL, memop, op1, op2);
2641 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2642 return new_d_Abs(NULL, op, mode);
2644 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2645 return new_d_And(NULL, op1, op2, mode);
2647 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2648 return new_d_Or(NULL, op1, op2, mode);
2650 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2651 return new_d_Eor(NULL, op1, op2, mode);
2653 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2654 return new_d_Not(NULL, op, mode);
2656 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2657 return new_d_Shl(NULL, op, k, mode);
2659 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2660 return new_d_Shr(NULL, op, k, mode);
2662 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2663 return new_d_Shrs(NULL, op, k, mode);
2665 #define new_Rotate new_Rot
2666 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2667 return new_d_Rot(NULL, op, k, mode);
2669 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2670 return new_d_Cmp(NULL, op1, op2);
2672 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2673 return new_d_Conv(NULL, op, mode);
2675 ir_node *new_Cast (ir_node *op, type *to_tp) {
2676 return new_d_Cast(NULL, op, to_tp);
2678 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2679 return new_d_Phi(NULL, arity, in, mode);
2681 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2682 return new_d_Load(NULL, store, addr, mode);
2684 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2685 return new_d_Store(NULL, store, addr, val);
2687 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2688 where_alloc where) {
2689 return new_d_Alloc(NULL, store, size, alloc_type, where);
2691 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2692 type *free_type, where_alloc where) {
2693 return new_d_Free(NULL, store, ptr, size, free_type, where);
2695 ir_node *new_Sync (int arity, ir_node **in) {
2696 return new_d_Sync(NULL, arity, in);
2698 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2699 return new_d_Proj(NULL, arg, mode, proj);
2701 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2702 return new_d_defaultProj(NULL, arg, max_proj);
2704 ir_node *new_Tuple (int arity, ir_node **in) {
2705 return new_d_Tuple(NULL, arity, in);
2707 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2708 return new_d_Id(NULL, val, mode);
2710 ir_node *new_Bad (void) {
2713 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2714 return new_d_Confirm (NULL, val, bound, cmp);
2716 ir_node *new_Unknown(ir_mode *m) {
2717 return new_d_Unknown(m);
2719 ir_node *new_CallBegin (ir_node *callee) {
2720 return new_d_CallBegin(NULL, callee);
2722 ir_node *new_EndReg (void) {
2723 return new_d_EndReg(NULL);
2725 ir_node *new_EndExcept (void) {
2726 return new_d_EndExcept(NULL);
2728 ir_node *new_Break (void) {
2729 return new_d_Break(NULL);
2731 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2732 return new_d_Filter(NULL, arg, mode, proj);
2734 ir_node *new_NoMem (void) {
2735 return new_d_NoMem();
2737 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2738 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);