3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /*** ******************************************** */
56 /** privat interfaces, for professional use only */
58 /* Constructs a Block with a fixed number of predecessors.
59 Does not set current_block. Can not be used with automatic
60 Phi node construction. */
62 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
66 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
67 set_Block_matured(res, 1);
68 set_Block_block_visited(res, 0);
70 /* res->attr.block.exc = exc_normal; */
71 /* res->attr.block.handler_entry = 0; */
72 res->attr.block.irg = irg;
73 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
74 res->attr.block.in_cg = NULL;
75 res->attr.block.cg_backedge = NULL;
77 IRN_VRFY_IRG(res, irg);
82 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
86 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
87 /* res->attr.start.irg = irg; */
89 IRN_VRFY_IRG(res, irg);
94 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
100 IRN_VRFY_IRG(res, irg);
104 /* Creates a Phi node with all predecessors. Calling this constructor
105 is only allowed if the corresponding block is mature. */
107 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
111 bool has_unknown = false;
113 /* Don't assert that block matured: the use of this constructor is strongly
115 if ( get_Block_matured(block) )
116 assert( get_irn_arity(block) == arity );
118 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
120 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
122 for (i = arity-1; i >= 0; i--)
123 if (get_irn_op(in[i]) == op_Unknown) {
128 if (!has_unknown) res = optimize_node (res);
129 IRN_VRFY_IRG(res, irg);
131 /* Memory Phis in endless loops must be kept alive.
132 As we can't distinguish these easily we keep all of them alive. */
133 if ((res->op == op_Phi) && (mode == mode_M))
134 add_End_keepalive(irg->end, res);
139 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
143 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
144 res->attr.con.tv = con;
145 set_Const_type(res, tp); /* Call method because of complex assertion. */
146 res = optimize_node (res);
147 assert(get_Const_type(res) == tp);
148 IRN_VRFY_IRG(res, irg);
154 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
156 type *tp = unknown_type;
157 /* removing this somehow causes errors in jack. */
158 if (tarval_is_entity(con))
159 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
161 return new_rd_Const_type (db, irg, block, mode, con, tp);
165 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
169 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
170 res = optimize_node(res);
171 IRN_VRFY_IRG(res, irg);
176 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
181 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
182 res->attr.proj = proj;
185 assert(get_Proj_pred(res));
186 assert(get_nodes_Block(get_Proj_pred(res)));
188 res = optimize_node(res);
190 IRN_VRFY_IRG(res, irg);
196 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
200 assert(arg->op == op_Cond);
201 arg->attr.c.kind = fragmentary;
202 arg->attr.c.default_proj = max_proj;
203 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
208 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
212 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
213 res = optimize_node(res);
214 IRN_VRFY_IRG(res, irg);
219 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
223 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
224 res->attr.cast.totype = to_tp;
225 res = optimize_node(res);
226 IRN_VRFY_IRG(res, irg);
231 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
235 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
236 res = optimize_node (res);
237 IRN_VRFY_IRG(res, irg);
242 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
243 ir_node *op1, ir_node *op2, ir_mode *mode)
250 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
251 res = optimize_node(res);
252 IRN_VRFY_IRG(res, irg);
257 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
258 ir_node *op1, ir_node *op2, ir_mode *mode)
265 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
266 res = optimize_node (res);
267 IRN_VRFY_IRG(res, irg);
272 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
273 ir_node *op, ir_mode *mode)
277 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
278 res = optimize_node(res);
279 IRN_VRFY_IRG(res, irg);
284 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
285 ir_node *op1, ir_node *op2, ir_mode *mode)
292 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
293 res = optimize_node(res);
294 IRN_VRFY_IRG(res, irg);
299 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
300 ir_node *memop, ir_node *op1, ir_node *op2)
308 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
309 res = optimize_node(res);
310 IRN_VRFY_IRG(res, irg);
315 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
325 res = optimize_node(res);
326 IRN_VRFY_IRG(res, irg);
331 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
340 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
347 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *memop, ir_node *op1, ir_node *op2)
356 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
357 res = optimize_node(res);
358 IRN_VRFY_IRG(res, irg);
363 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
364 ir_node *op1, ir_node *op2, ir_mode *mode)
371 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
372 res = optimize_node(res);
373 IRN_VRFY_IRG(res, irg);
378 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
379 ir_node *op1, ir_node *op2, ir_mode *mode)
386 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
387 res = optimize_node(res);
388 IRN_VRFY_IRG(res, irg);
393 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
394 ir_node *op1, ir_node *op2, ir_mode *mode)
401 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
402 res = optimize_node (res);
403 IRN_VRFY_IRG(res, irg);
408 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
409 ir_node *op, ir_mode *mode)
413 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
414 res = optimize_node(res);
415 IRN_VRFY_IRG(res, irg);
420 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
421 ir_node *op, ir_node *k, ir_mode *mode)
428 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
429 res = optimize_node(res);
430 IRN_VRFY_IRG(res, irg);
435 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
436 ir_node *op, ir_node *k, ir_mode *mode)
443 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
444 res = optimize_node(res);
445 IRN_VRFY_IRG(res, irg);
450 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
451 ir_node *op, ir_node *k, ir_mode *mode)
458 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
459 res = optimize_node(res);
460 IRN_VRFY_IRG(res, irg);
465 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
466 ir_node *op, ir_node *k, ir_mode *mode)
473 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
474 res = optimize_node(res);
475 IRN_VRFY_IRG(res, irg);
480 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
481 ir_node *op, ir_mode *mode)
485 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
486 res = optimize_node (res);
487 IRN_VRFY_IRG(res, irg);
492 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
493 ir_node *op1, ir_node *op2)
500 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
501 res = optimize_node(res);
502 IRN_VRFY_IRG(res, irg);
507 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
511 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
512 res = optimize_node (res);
513 IRN_VRFY_IRG (res, irg);
518 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
522 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
523 res->attr.c.kind = dense;
524 res->attr.c.default_proj = 0;
525 res = optimize_node (res);
526 IRN_VRFY_IRG(res, irg);
531 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
532 ir_node *callee, int arity, ir_node **in, type *tp)
539 NEW_ARR_A(ir_node *, r_in, r_arity);
542 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
544 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
546 assert(is_method_type(tp));
547 set_Call_type(res, tp);
548 res->attr.call.callee_arr = NULL;
549 res = optimize_node(res);
550 IRN_VRFY_IRG(res, irg);
555 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
556 ir_node *store, int arity, ir_node **in)
563 NEW_ARR_A (ir_node *, r_in, r_arity);
565 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
566 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
580 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
581 res = optimize_node(res);
582 IRN_VRFY_IRG(res, irg);
587 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
588 ir_node *store, ir_node *adr)
595 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
596 res = optimize_node(res);
597 IRN_VRFY_IRG(res, irg);
602 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
603 ir_node *store, ir_node *adr, ir_node *val)
611 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
618 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
619 ir_node *size, type *alloc_type, where_alloc where)
626 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
627 res->attr.a.where = where;
628 res->attr.a.type = alloc_type;
629 res = optimize_node(res);
630 IRN_VRFY_IRG(res, irg);
635 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
636 ir_node *ptr, ir_node *size, type *free_type)
644 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
645 res->attr.f = free_type;
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, irg);
652 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
653 int arity, ir_node **in, entity *ent)
659 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
662 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
665 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
666 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
667 res->attr.s.ent = ent;
668 res = optimize_node(res);
669 IRN_VRFY_IRG(res, irg);
674 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
675 ir_node *objptr, type *ent)
682 NEW_ARR_A(ir_node *, r_in, r_arity);
686 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
687 res->attr.io.ent = ent;
689 /* res = optimize(res); */
690 IRN_VRFY_IRG(res, irg);
695 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
696 symconst_kind symkind, type *tp)
701 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
705 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
707 res->attr.i.num = symkind;
708 res->attr.i.sym = value;
711 res = optimize_node(res);
712 IRN_VRFY_IRG(res, irg);
717 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
718 symconst_kind symkind)
720 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
724 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
725 symconst_symbol sym = {(type *)symbol};
726 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
729 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
730 symconst_symbol sym = {(type *)symbol};
731 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
734 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
735 symconst_symbol sym = {symbol};
736 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
739 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
740 symconst_symbol sym = {symbol};
741 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
745 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
749 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
750 res = optimize_node(res);
751 IRN_VRFY_IRG(res, irg);
756 new_rd_Bad (ir_graph *irg)
762 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
764 ir_node *in[2], *res;
768 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
769 res->attr.confirm_cmp = cmp;
770 res = optimize_node (res);
771 IRN_VRFY_IRG(res, irg);
776 new_rd_Unknown (ir_graph *irg, ir_mode *m)
778 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
782 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
787 in[0] = get_Call_ptr(call);
788 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
789 /* res->attr.callbegin.irg = irg; */
790 res->attr.callbegin.call = call;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
797 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
801 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
803 IRN_VRFY_IRG(res, irg);
808 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
812 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
813 irg->end_except = res;
814 IRN_VRFY_IRG (res, irg);
819 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
823 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
830 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
835 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
836 res->attr.filter.proj = proj;
837 res->attr.filter.in_cg = NULL;
838 res->attr.filter.backedge = NULL;
841 assert(get_Proj_pred(res));
842 assert(get_nodes_Block(get_Proj_pred(res)));
844 res = optimize_node(res);
845 IRN_VRFY_IRG(res, irg);
851 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
852 ir_node *callee, int arity, ir_node **in, type *tp)
859 NEW_ARR_A(ir_node *, r_in, r_arity);
861 memcpy(&r_in[1], in, sizeof (ir_node *) * arity);
863 res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
865 assert(is_method_type(tp));
866 set_FuncCall_type(res, tp);
867 res->attr.call.callee_arr = NULL;
868 res = optimize_node(res);
869 IRN_VRFY_IRG(res, irg);
874 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
875 return new_rd_Block(NULL, irg, arity, in);
877 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
878 return new_rd_Start(NULL, irg, block);
880 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
881 return new_rd_End(NULL, irg, block);
883 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
884 return new_rd_Jmp(NULL, irg, block);
886 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
887 return new_rd_Cond(NULL, irg, block, c);
889 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
890 ir_node *store, int arity, ir_node **in) {
891 return new_rd_Return(NULL, irg, block, store, arity, in);
893 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
894 ir_node *store, ir_node *obj) {
895 return new_rd_Raise(NULL, irg, block, store, obj);
897 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
898 ir_mode *mode, tarval *con) {
899 return new_rd_Const(NULL, irg, block, mode, con);
901 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
902 symconst_symbol value, symconst_kind symkind) {
903 return new_rd_SymConst(NULL, irg, block, value, symkind);
905 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
906 ir_node *objptr, int n_index, ir_node **index,
908 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
910 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
912 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
914 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
915 ir_node *callee, int arity, ir_node **in,
917 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
919 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
920 ir_node *op1, ir_node *op2, ir_mode *mode) {
921 return new_rd_Add(NULL, irg, block, op1, op2, mode);
923 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
924 ir_node *op1, ir_node *op2, ir_mode *mode) {
925 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
927 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
928 ir_node *op, ir_mode *mode) {
929 return new_rd_Minus(NULL, irg, block, op, mode);
931 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
932 ir_node *op1, ir_node *op2, ir_mode *mode) {
933 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
935 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
936 ir_node *memop, ir_node *op1, ir_node *op2) {
937 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
939 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
940 ir_node *memop, ir_node *op1, ir_node *op2) {
941 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
943 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
944 ir_node *memop, ir_node *op1, ir_node *op2) {
945 return new_rd_Div(NULL, irg, block, memop, op1, op2);
947 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
948 ir_node *memop, ir_node *op1, ir_node *op2) {
949 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
951 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
952 ir_node *op, ir_mode *mode) {
953 return new_rd_Abs(NULL, irg, block, op, mode);
955 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
956 ir_node *op1, ir_node *op2, ir_mode *mode) {
957 return new_rd_And(NULL, irg, block, op1, op2, mode);
959 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
960 ir_node *op1, ir_node *op2, ir_mode *mode) {
961 return new_rd_Or(NULL, irg, block, op1, op2, mode);
963 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
964 ir_node *op1, ir_node *op2, ir_mode *mode) {
965 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
967 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
968 ir_node *op, ir_mode *mode) {
969 return new_rd_Not(NULL, irg, block, op, mode);
971 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
972 ir_node *op1, ir_node *op2) {
973 return new_rd_Cmp(NULL, irg, block, op1, op2);
975 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
976 ir_node *op, ir_node *k, ir_mode *mode) {
977 return new_rd_Shl(NULL, irg, block, op, k, mode);
979 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
980 ir_node *op, ir_node *k, ir_mode *mode) {
981 return new_rd_Shr(NULL, irg, block, op, k, mode);
983 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
984 ir_node *op, ir_node *k, ir_mode *mode) {
985 return new_rd_Shrs(NULL, irg, block, op, k, mode);
987 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
988 ir_node *op, ir_node *k, ir_mode *mode) {
989 return new_rd_Rot(NULL, irg, block, op, k, mode);
991 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
992 ir_node *op, ir_mode *mode) {
993 return new_rd_Conv(NULL, irg, block, op, mode);
995 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
996 return new_rd_Cast(NULL, irg, block, op, to_tp);
998 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
999 ir_node **in, ir_mode *mode) {
1000 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1002 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1003 ir_node *store, ir_node *adr) {
1004 return new_rd_Load(NULL, irg, block, store, adr);
1006 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1007 ir_node *store, ir_node *adr, ir_node *val) {
1008 return new_rd_Store(NULL, irg, block, store, adr, val);
1010 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1011 ir_node *size, type *alloc_type, where_alloc where) {
1012 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1014 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1015 ir_node *ptr, ir_node *size, type *free_type) {
1016 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1018 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1019 return new_rd_Sync(NULL, irg, block, arity, in);
1021 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1022 ir_mode *mode, long proj) {
1023 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1025 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1027 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1029 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1030 int arity, ir_node **in) {
1031 return new_rd_Tuple(NULL, irg, block, arity, in );
1033 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1034 ir_node *val, ir_mode *mode) {
1035 return new_rd_Id(NULL, irg, block, val, mode);
1037 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1038 return new_rd_Bad(irg);
1040 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1041 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1043 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1044 return new_rd_Unknown(irg, m);
1046 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1047 return new_rd_CallBegin(NULL, irg, block, callee);
1049 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1050 return new_rd_EndReg(NULL, irg, block);
1052 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1053 return new_rd_EndExcept(NULL, irg, block);
1055 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1056 return new_rd_Break(NULL, irg, block);
1058 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1059 ir_mode *mode, long proj) {
1060 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1062 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1063 ir_node *callee, int arity, ir_node **in,
1065 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1069 /** ********************/
1070 /** public interfaces */
1071 /** construction tools */
1075 * - create a new Start node in the current block
1077 * @return s - pointer to the created Start node
1082 new_d_Start (dbg_info* db)
1086 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1087 op_Start, mode_T, 0, NULL);
1088 /* res->attr.start.irg = current_ir_graph; */
1090 res = optimize_node(res);
1091 IRN_VRFY_IRG(res, current_ir_graph);
1096 new_d_End (dbg_info* db)
1099 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1100 op_End, mode_X, -1, NULL);
1101 res = optimize_node(res);
1102 IRN_VRFY_IRG(res, current_ir_graph);
1107 /* Constructs a Block with a fixed number of predecessors.
1108 Does set current_block. Can be used with automatic Phi
1109 node construction. */
1111 new_d_Block (dbg_info* db, int arity, ir_node **in)
1115 bool has_unknown = false;
1117 res = new_rd_Block(db, current_ir_graph, arity, in);
1119 /* Create and initialize array for Phi-node construction. */
1120 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1121 current_ir_graph->n_loc);
1122 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1124 for (i = arity-1; i >= 0; i--)
1125 if (get_irn_op(in[i]) == op_Unknown) {
1130 if (!has_unknown) res = optimize_node(res);
1131 current_ir_graph->current_block = res;
1133 IRN_VRFY_IRG(res, current_ir_graph);
1138 /* ***********************************************************************/
1139 /* Methods necessary for automatic Phi node creation */
1141 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1142 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1143 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1144 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1146 Call Graph: ( A ---> B == A "calls" B)
1148 get_value mature_block
1156 get_r_value_internal |
1160 new_rd_Phi0 new_rd_Phi_in
1162 * *************************************************************************** */
1164 /** Creates a Phi node with 0 predecessors */
1165 static INLINE ir_node *
1166 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1170 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1171 IRN_VRFY_IRG(res, irg);
1175 /* There are two implementations of the Phi node construction. The first
1176 is faster, but does not work for blocks with more than 2 predecessors.
1177 The second works always but is slower and causes more unnecessary Phi
1179 Select the implementations by the following preprocessor flag set in
1181 #if USE_FAST_PHI_CONSTRUCTION
1183 /* This is a stack used for allocating and deallocating nodes in
1184 new_rd_Phi_in. The original implementation used the obstack
1185 to model this stack, now it is explicit. This reduces side effects.
1187 #if USE_EXPLICIT_PHI_IN_STACK
1188 INLINE Phi_in_stack *
1189 new_Phi_in_stack(void) {
1192 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1194 res->stack = NEW_ARR_F (ir_node *, 0);
1201 free_Phi_in_stack(Phi_in_stack *s) {
1202 DEL_ARR_F(s->stack);
1206 free_to_Phi_in_stack(ir_node *phi) {
1207 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1208 current_ir_graph->Phi_in_stack->pos)
1209 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1211 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1213 (current_ir_graph->Phi_in_stack->pos)++;
1216 static INLINE ir_node *
1217 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1218 int arity, ir_node **in) {
1220 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1221 int pos = current_ir_graph->Phi_in_stack->pos;
1225 /* We need to allocate a new node */
1226 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1227 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1229 /* reuse the old node and initialize it again. */
1232 assert (res->kind == k_ir_node);
1233 assert (res->op == op_Phi);
1237 assert (arity >= 0);
1238 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1239 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1241 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1243 (current_ir_graph->Phi_in_stack->pos)--;
1247 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1249 /* Creates a Phi node with a given, fixed array **in of predecessors.
1250 If the Phi node is unnecessary, as the same value reaches the block
1251 through all control flow paths, it is eliminated and the value
1252 returned directly. This constructor is only intended for use in
1253 the automatic Phi node generation triggered by get_value or mature.
1254 The implementation is quite tricky and depends on the fact, that
1255 the nodes are allocated on a stack:
1256 The in array contains predecessors and NULLs. The NULLs appear,
1257 if get_r_value_internal, that computed the predecessors, reached
1258 the same block on two paths. In this case the same value reaches
1259 this block on both paths, there is no definition in between. We need
1260 not allocate a Phi where these path's merge, but we have to communicate
1261 this fact to the caller. This happens by returning a pointer to the
1262 node the caller _will_ allocate. (Yes, we predict the address. We can
1263 do so because the nodes are allocated on the obstack.) The caller then
1264 finds a pointer to itself and, when this routine is called again,
1267 static INLINE ir_node *
1268 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1271 ir_node *res, *known;
1273 /* Allocate a new node on the obstack. This can return a node to
1274 which some of the pointers in the in-array already point.
1275 Attention: the constructor copies the in array, i.e., the later
1276 changes to the array in this routine do not affect the
1277 constructed node! If the in array contains NULLs, there will be
1278 missing predecessors in the returned node. Is this a possible
1279 internal state of the Phi node generation? */
1280 #if USE_EXPLICIT_PHI_IN_STACK
1281 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1283 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1284 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1287 /* The in-array can contain NULLs. These were returned by
1288 get_r_value_internal if it reached the same block/definition on a
1289 second path. The NULLs are replaced by the node itself to
1290 simplify the test in the next loop. */
1291 for (i = 0; i < ins; ++i) {
1296 /* This loop checks whether the Phi has more than one predecessor.
1297 If so, it is a real Phi node and we break the loop. Else the Phi
1298 node merges the same definition on several paths and therefore is
1300 for (i = 0; i < ins; ++i)
1302 if (in[i] == res || in[i] == known) continue;
1310 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1312 #if USE_EXPLICIT_PHI_IN_STACK
1313 free_to_Phi_in_stack(res);
1315 obstack_free (current_ir_graph->obst, res);
1319 res = optimize_node (res);
1320 IRN_VRFY_IRG(res, irg);
1323 /* return the pointer to the Phi node. This node might be deallocated! */
1328 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1331 allocates and returns this node. The routine called to allocate the
1332 node might optimize it away and return a real value, or even a pointer
1333 to a deallocated Phi node on top of the obstack!
1334 This function is called with an in-array of proper size. **/
1336 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1338 ir_node *prevBlock, *res;
1341 /* This loop goes to all predecessor blocks of the block the Phi node is in
1342 and there finds the operands of the Phi node by calling
1343 get_r_value_internal. */
1344 for (i = 1; i <= ins; ++i) {
1345 assert (block->in[i]);
1346 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1348 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1351 /* After collecting all predecessors into the array nin a new Phi node
1352 with these predecessors is created. This constructor contains an
1353 optimization: If all predecessors of the Phi node are identical it
1354 returns the only operand instead of a new Phi node. If the value
1355 passes two different control flow edges without being defined, and
1356 this is the second path treated, a pointer to the node that will be
1357 allocated for the first path (recursion) is returned. We already
1358 know the address of this node, as it is the next node to be allocated
1359 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1360 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1362 /* Now we now the value for "pos" and can enter it in the array with
1363 all known local variables. Attention: this might be a pointer to
1364 a node, that later will be allocated!!! See new_rd_Phi_in.
1365 If this is called in mature, after some set_value in the same block,
1366 the proper value must not be overwritten:
1368 get_value (makes Phi0, put's it into graph_arr)
1369 set_value (overwrites Phi0 in graph_arr)
1370 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1373 if (!block->attr.block.graph_arr[pos]) {
1374 block->attr.block.graph_arr[pos] = res;
1376 /* printf(" value already computed by %s\n",
1377 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1383 /* This function returns the last definition of a variable. In case
1384 this variable was last defined in a previous block, Phi nodes are
1385 inserted. If the part of the firm graph containing the definition
1386 is not yet constructed, a dummy Phi node is returned. */
1388 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1391 /* There are 4 cases to treat.
1393 1. The block is not mature and we visit it the first time. We can not
1394 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1395 predecessors is returned. This node is added to the linked list (field
1396 "link") of the containing block to be completed when this block is
1397 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1400 2. The value is already known in this block, graph_arr[pos] is set and we
1401 visit the block the first time. We can return the value without
1402 creating any new nodes.
1404 3. The block is mature and we visit it the first time. A Phi node needs
1405 to be created (phi_merge). If the Phi is not needed, as all it's
1406 operands are the same value reaching the block through different
1407 paths, it's optimized away and the value itself is returned.
1409 4. The block is mature, and we visit it the second time. Now two
1410 subcases are possible:
1411 * The value was computed completely the last time we were here. This
1412 is the case if there is no loop. We can return the proper value.
1413 * The recursion that visited this node and set the flag did not
1414 return yet. We are computing a value in a loop and need to
1415 break the recursion without knowing the result yet.
1416 @@@ strange case. Straight forward we would create a Phi before
1417 starting the computation of it's predecessors. In this case we will
1418 find a Phi here in any case. The problem is that this implementation
1419 only creates a Phi after computing the predecessors, so that it is
1420 hard to compute self references of this Phi. @@@
1421 There is no simple check for the second subcase. Therefore we check
1422 for a second visit and treat all such cases as the second subcase.
1423 Anyways, the basic situation is the same: we reached a block
1424 on two paths without finding a definition of the value: No Phi
1425 nodes are needed on both paths.
1426 We return this information "Two paths, no Phi needed" by a very tricky
1427 implementation that relies on the fact that an obstack is a stack and
1428 will return a node with the same address on different allocations.
1429 Look also at phi_merge and new_rd_phi_in to understand this.
1430 @@@ Unfortunately this does not work, see testprogram
1431 three_cfpred_example.
1435 /* case 4 -- already visited. */
1436 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1438 /* visited the first time */
1439 set_irn_visited(block, get_irg_visited(current_ir_graph));
1441 /* Get the local valid value */
1442 res = block->attr.block.graph_arr[pos];
1444 /* case 2 -- If the value is actually computed, return it. */
1445 if (res) return res;
1447 if (block->attr.block.matured) { /* case 3 */
1449 /* The Phi has the same amount of ins as the corresponding block. */
1450 int ins = get_irn_arity(block);
1452 NEW_ARR_A (ir_node *, nin, ins);
1454 /* Phi merge collects the predecessors and then creates a node. */
1455 res = phi_merge (block, pos, mode, nin, ins);
1457 } else { /* case 1 */
1458 /* The block is not mature, we don't know how many in's are needed. A Phi
1459 with zero predecessors is created. Such a Phi node is called Phi0
1460 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1461 to the list of Phi0 nodes in this block to be matured by mature_block
1463 The Phi0 has to remember the pos of it's internal value. If the real
1464 Phi is computed, pos is used to update the array with the local
1467 res = new_rd_Phi0 (current_ir_graph, block, mode);
1468 res->attr.phi0_pos = pos;
1469 res->link = block->link;
1473 /* If we get here, the frontend missed a use-before-definition error */
1476 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1477 assert (mode->code >= irm_F && mode->code <= irm_P);
1478 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1479 tarval_mode_null[mode->code]);
1482 /* The local valid value is available now. */
1483 block->attr.block.graph_arr[pos] = res;
1491 it starts the recursion. This causes an Id at the entry of
1492 every block that has no definition of the value! **/
1494 #if USE_EXPLICIT_PHI_IN_STACK
1496 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1497 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1500 static INLINE ir_node *
1501 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1502 ir_node **in, int ins, ir_node *phi0)
1505 ir_node *res, *known;
1507 /* Allocate a new node on the obstack. The allocation copies the in
1509 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1510 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1512 /* This loop checks whether the Phi has more than one predecessor.
1513 If so, it is a real Phi node and we break the loop. Else the
1514 Phi node merges the same definition on several paths and therefore
1515 is not needed. Don't consider Bad nodes! */
1517 for (i=0; i < ins; ++i)
1521 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1523 /* Optimize self referencing Phis: We can't detect them yet properly, as
1524 they still refer to the Phi0 they will replace. So replace right now. */
1525 if (phi0 && in[i] == phi0) in[i] = res;
1527 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1535 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1538 obstack_free (current_ir_graph->obst, res);
1539 if (is_Phi(known)) {
1540 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1541 order, an enclosing Phi know may get superfluous. */
1542 res = optimize_in_place_2(known);
1543 if (res != known) { exchange(known, res); }
1548 /* A undefined value, e.g., in unreachable code. */
1552 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1553 IRN_VRFY_IRG(res, irg);
1554 /* Memory Phis in endless loops must be kept alive.
1555 As we can't distinguish these easily we keep all of them alive. */
1556 if ((res->op == op_Phi) && (mode == mode_M))
1557 add_End_keepalive(irg->end, res);
1564 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1566 #if PRECISE_EXC_CONTEXT
1568 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1570 /* Construct a new frag_array for node n.
1571 Copy the content from the current graph_arr of the corresponding block:
1572 this is the current state.
1573 Set ProjM(n) as current memory state.
1574 Further the last entry in frag_arr of current block points to n. This
1575 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1577 static INLINE ir_node ** new_frag_arr (ir_node *n)
1582 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1583 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1584 sizeof(ir_node *)*current_ir_graph->n_loc);
1586 /* turn off optimization before allocating Proj nodes, as res isn't
1588 opt = get_opt_optimize(); set_optimize(0);
1589 /* Here we rely on the fact that all frag ops have Memory as first result! */
1590 if (get_irn_op(n) == op_Call)
1591 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1593 assert((pn_Quot_M == pn_DivMod_M) &&
1594 (pn_Quot_M == pn_Div_M) &&
1595 (pn_Quot_M == pn_Mod_M) &&
1596 (pn_Quot_M == pn_Load_M) &&
1597 (pn_Quot_M == pn_Store_M) &&
1598 (pn_Quot_M == pn_Alloc_M) );
1599 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1603 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1607 static INLINE ir_node **
1608 get_frag_arr (ir_node *n) {
1609 if (get_irn_op(n) == op_Call) {
1610 return n->attr.call.frag_arr;
1611 } else if (get_irn_op(n) == op_Alloc) {
1612 return n->attr.a.frag_arr;
1614 return n->attr.frag_arr;
1619 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1621 if (!frag_arr[pos]) frag_arr[pos] = val;
1622 if (frag_arr[current_ir_graph->n_loc - 1]) {
1623 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1624 assert(arr != frag_arr && "Endless recursion detected");
1625 set_frag_value(arr, pos, val);
1630 for (i = 0; i < 1000; ++i) {
1631 if (!frag_arr[pos]) {
1632 frag_arr[pos] = val;
1634 if (frag_arr[current_ir_graph->n_loc - 1]) {
1635 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1641 assert(0 && "potential endless recursion");
1646 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1650 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1652 frag_arr = get_frag_arr(cfOp);
1653 res = frag_arr[pos];
1655 if (block->attr.block.graph_arr[pos]) {
1656 /* There was a set_value after the cfOp and no get_value before that
1657 set_value. We must build a Phi node now. */
1658 if (block->attr.block.matured) {
1659 int ins = get_irn_arity(block);
1661 NEW_ARR_A (ir_node *, nin, ins);
1662 res = phi_merge(block, pos, mode, nin, ins);
1664 res = new_rd_Phi0 (current_ir_graph, block, mode);
1665 res->attr.phi0_pos = pos;
1666 res->link = block->link;
1670 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1671 but this should be better: (remove comment if this works) */
1672 /* It's a Phi, we can write this into all graph_arrs with NULL */
1673 set_frag_value(block->attr.block.graph_arr, pos, res);
1675 res = get_r_value_internal(block, pos, mode);
1676 set_frag_value(block->attr.block.graph_arr, pos, res);
1684 computes the predecessors for the real phi node, and then
1685 allocates and returns this node. The routine called to allocate the
1686 node might optimize it away and return a real value.
1687 This function must be called with an in-array of proper size. **/
1689 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1691 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1694 /* If this block has no value at pos create a Phi0 and remember it
1695 in graph_arr to break recursions.
1696 Else we may not set graph_arr as there a later value is remembered. */
1698 if (!block->attr.block.graph_arr[pos]) {
1699 if (block == get_irg_start_block(current_ir_graph)) {
1700 /* Collapsing to Bad tarvals is no good idea.
1701 So we call a user-supplied routine here that deals with this case as
1702 appropriate for the given language. Sorryly the only help we can give
1703 here is the position.
1705 Even if all variables are defined before use, it can happen that
1706 we get to the start block, if a cond has been replaced by a tuple
1707 (bad, jmp). In this case we call the function needlessly, eventually
1708 generating an non existant error.
1709 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1712 if (default_initialize_local_variable)
1713 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1715 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1716 /* We don't need to care about exception ops in the start block.
1717 There are none by definition. */
1718 return block->attr.block.graph_arr[pos];
1720 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1721 block->attr.block.graph_arr[pos] = phi0;
1722 #if PRECISE_EXC_CONTEXT
1723 if (get_opt_precise_exc_context()) {
1724 /* Set graph_arr for fragile ops. Also here we should break recursion.
1725 We could choose a cyclic path through an cfop. But the recursion would
1726 break at some point. */
1727 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1733 /* This loop goes to all predecessor blocks of the block the Phi node
1734 is in and there finds the operands of the Phi node by calling
1735 get_r_value_internal. */
1736 for (i = 1; i <= ins; ++i) {
1737 prevCfOp = skip_Proj(block->in[i]);
1739 if (is_Bad(prevCfOp)) {
1740 /* In case a Cond has been optimized we would get right to the start block
1741 with an invalid definition. */
1742 nin[i-1] = new_Bad();
1745 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1747 if (!is_Bad(prevBlock)) {
1748 #if PRECISE_EXC_CONTEXT
1749 if (get_opt_precise_exc_context() &&
1750 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1751 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1752 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1755 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1757 nin[i-1] = new_Bad();
1761 /* We want to pass the Phi0 node to the constructor: this finds additional
1762 optimization possibilities.
1763 The Phi0 node either is allocated in this function, or it comes from
1764 a former call to get_r_value_internal. In this case we may not yet
1765 exchange phi0, as this is done in mature_block. */
1767 phi0_all = block->attr.block.graph_arr[pos];
1768 if (!((get_irn_op(phi0_all) == op_Phi) &&
1769 (get_irn_arity(phi0_all) == 0) &&
1770 (get_nodes_block(phi0_all) == block)))
1776 /* After collecting all predecessors into the array nin a new Phi node
1777 with these predecessors is created. This constructor contains an
1778 optimization: If all predecessors of the Phi node are identical it
1779 returns the only operand instead of a new Phi node. */
1780 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1782 /* In case we allocated a Phi0 node at the beginning of this procedure,
1783 we need to exchange this Phi0 with the real Phi. */
1785 exchange(phi0, res);
1786 block->attr.block.graph_arr[pos] = res;
1787 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1788 only an optimization. */
1794 /* This function returns the last definition of a variable. In case
1795 this variable was last defined in a previous block, Phi nodes are
1796 inserted. If the part of the firm graph containing the definition
1797 is not yet constructed, a dummy Phi node is returned. */
1799 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1802 /* There are 4 cases to treat.
1804 1. The block is not mature and we visit it the first time. We can not
1805 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1806 predecessors is returned. This node is added to the linked list (field
1807 "link") of the containing block to be completed when this block is
1808 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1811 2. The value is already known in this block, graph_arr[pos] is set and we
1812 visit the block the first time. We can return the value without
1813 creating any new nodes.
1815 3. The block is mature and we visit it the first time. A Phi node needs
1816 to be created (phi_merge). If the Phi is not needed, as all it's
1817 operands are the same value reaching the block through different
1818 paths, it's optimized away and the value itself is returned.
1820 4. The block is mature, and we visit it the second time. Now two
1821 subcases are possible:
1822 * The value was computed completely the last time we were here. This
1823 is the case if there is no loop. We can return the proper value.
1824 * The recursion that visited this node and set the flag did not
1825 return yet. We are computing a value in a loop and need to
1826 break the recursion. This case only happens if we visited
1827 the same block with phi_merge before, which inserted a Phi0.
1828 So we return the Phi0.
1831 /* case 4 -- already visited. */
1832 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1833 /* As phi_merge allocates a Phi0 this value is always defined. Here
1834 is the critical difference of the two algorithms. */
1835 assert(block->attr.block.graph_arr[pos]);
1836 return block->attr.block.graph_arr[pos];
1839 /* visited the first time */
1840 set_irn_visited(block, get_irg_visited(current_ir_graph));
1842 /* Get the local valid value */
1843 res = block->attr.block.graph_arr[pos];
1845 /* case 2 -- If the value is actually computed, return it. */
1846 if (res) { return res; };
1848 if (block->attr.block.matured) { /* case 3 */
1850 /* The Phi has the same amount of ins as the corresponding block. */
1851 int ins = get_irn_arity(block);
1853 NEW_ARR_A (ir_node *, nin, ins);
1855 /* Phi merge collects the predecessors and then creates a node. */
1856 res = phi_merge (block, pos, mode, nin, ins);
1858 } else { /* case 1 */
1859 /* The block is not mature, we don't know how many in's are needed. A Phi
1860 with zero predecessors is created. Such a Phi node is called Phi0
1861 node. The Phi0 is then added to the list of Phi0 nodes in this block
1862 to be matured by mature_block later.
1863 The Phi0 has to remember the pos of it's internal value. If the real
1864 Phi is computed, pos is used to update the array with the local
1866 res = new_rd_Phi0 (current_ir_graph, block, mode);
1867 res->attr.phi0_pos = pos;
1868 res->link = block->link;
1872 /* If we get here, the frontend missed a use-before-definition error */
1875 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1876 assert (mode->code >= irm_F && mode->code <= irm_P);
1877 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1878 get_mode_null(mode));
1881 /* The local valid value is available now. */
1882 block->attr.block.graph_arr[pos] = res;
1887 #endif /* USE_FAST_PHI_CONSTRUCTION */
1889 /* ************************************************************************** */
1891 /** Finalize a Block node, when all control flows are known. */
1892 /** Acceptable parameters are only Block nodes. */
1894 mature_block (ir_node *block)
1901 assert (get_irn_opcode(block) == iro_Block);
1902 /* @@@ should be commented in
1903 assert (!get_Block_matured(block) && "Block already matured"); */
1905 if (!get_Block_matured(block)) {
1906 ins = ARR_LEN (block->in)-1;
1907 /* Fix block parameters */
1908 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1910 /* An array for building the Phi nodes. */
1911 NEW_ARR_A (ir_node *, nin, ins);
1913 /* Traverse a chain of Phi nodes attached to this block and mature
1915 for (n = block->link; n; n=next) {
1916 inc_irg_visited(current_ir_graph);
1918 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1921 block->attr.block.matured = 1;
1923 /* Now, as the block is a finished firm node, we can optimize it.
1924 Since other nodes have been allocated since the block was created
1925 we can not free the node on the obstack. Therefore we have to call
1927 Unfortunately the optimization does not change a lot, as all allocated
1928 nodes refer to the unoptimized node.
1929 We can call _2, as global cse has no effect on blocks. */
1930 block = optimize_in_place_2(block);
1931 IRN_VRFY_IRG(block, current_ir_graph);
1936 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1938 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1943 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1945 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1950 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1952 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1958 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1960 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1965 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1967 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1972 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1975 assert(arg->op == op_Cond);
1976 arg->attr.c.kind = fragmentary;
1977 arg->attr.c.default_proj = max_proj;
1978 res = new_Proj (arg, mode_X, max_proj);
1983 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1985 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
1990 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1992 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1996 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1998 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2003 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2005 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2010 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2012 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2018 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2020 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2025 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2027 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2032 * allocate the frag array
2034 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2035 if (get_opt_precise_exc_context()) {
2036 if ((current_ir_graph->phase_state == phase_building) &&
2037 (get_irn_op(res) == op) && /* Could be optimized away. */
2038 !*frag_store) /* Could be a cse where the arr is already set. */ {
2039 *frag_store = new_frag_arr(res);
2046 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2049 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2051 #if PRECISE_EXC_CONTEXT
2052 allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */
2059 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2062 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2064 #if PRECISE_EXC_CONTEXT
2065 allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */
2072 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2075 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2077 #if PRECISE_EXC_CONTEXT
2078 allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */
2085 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2088 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2090 #if PRECISE_EXC_CONTEXT
2091 allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */
2098 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2100 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2105 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2107 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2112 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2114 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2119 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2121 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2126 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2128 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2133 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2135 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2140 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2142 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2147 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2149 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2154 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2156 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2161 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2163 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2168 new_d_Jmp (dbg_info* db)
2170 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2174 new_d_Cond (dbg_info* db, ir_node *c)
2176 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2180 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2184 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2185 store, callee, arity, in, tp);
2186 #if PRECISE_EXC_CONTEXT
2187 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2194 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2196 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2201 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2203 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2208 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2211 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2213 #if PRECISE_EXC_CONTEXT
2214 allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */
2221 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2224 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2226 #if PRECISE_EXC_CONTEXT
2227 allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */
2234 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2238 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2239 store, size, alloc_type, where);
2240 #if PRECISE_EXC_CONTEXT
2241 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2248 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2250 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2251 store, ptr, size, free_type);
2255 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2256 /* GL: objptr was called frame before. Frame was a bad choice for the name
2257 as the operand could as well be a pointer to a dynamic object. */
2259 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2260 store, objptr, 0, NULL, ent);
2264 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2266 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2267 store, objptr, n_index, index, sel);
2271 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2273 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2274 store, objptr, ent));
2278 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2280 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2285 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2287 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2292 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2294 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2302 return __new_d_Bad();
2306 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2308 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2313 new_d_Unknown (ir_mode *m)
2315 return new_rd_Unknown(current_ir_graph, m);
2319 new_d_CallBegin (dbg_info *db, ir_node *call)
2322 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2327 new_d_EndReg (dbg_info *db)
2330 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2335 new_d_EndExcept (dbg_info *db)
2338 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2343 new_d_Break (dbg_info *db)
2345 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2349 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2351 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2356 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2360 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2361 callee, arity, in, tp);
2366 /* ********************************************************************* */
2367 /* Comfortable interface with automatic Phi node construction. */
2368 /* (Uses also constructors of ?? interface, except new_Block. */
2369 /* ********************************************************************* */
2371 /* * Block construction **/
2372 /* immature Block without predecessors */
2373 ir_node *new_d_immBlock (dbg_info* db) {
2376 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2377 /* creates a new dynamic in-array as length of in is -1 */
2378 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2379 current_ir_graph->current_block = res;
2380 res->attr.block.matured = 0;
2381 /* res->attr.block.exc = exc_normal; */
2382 /* res->attr.block.handler_entry = 0; */
2383 res->attr.block.irg = current_ir_graph;
2384 res->attr.block.backedge = NULL;
2385 res->attr.block.in_cg = NULL;
2386 res->attr.block.cg_backedge = NULL;
2387 set_Block_block_visited(res, 0);
2389 /* Create and initialize array for Phi-node construction. */
2390 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2391 current_ir_graph->n_loc);
2392 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2394 /* Immature block may not be optimized! */
2395 IRN_VRFY_IRG(res, current_ir_graph);
2401 new_immBlock (void) {
2402 return new_d_immBlock(NULL);
2405 /* add an adge to a jmp/control flow node */
2407 add_in_edge (ir_node *block, ir_node *jmp)
2409 if (block->attr.block.matured) {
2410 assert(0 && "Error: Block already matured!\n");
2413 assert(jmp != NULL);
2414 ARR_APP1(ir_node *, block->in, jmp);
2418 /* changing the current block */
2420 switch_block (ir_node *target)
2422 current_ir_graph->current_block = target;
2425 /* ************************ */
2426 /* parameter administration */
2428 /* get a value from the parameter array from the current block by its index */
2430 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2432 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2433 inc_irg_visited(current_ir_graph);
2435 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2437 /* get a value from the parameter array from the current block by its index */
2439 get_value (int pos, ir_mode *mode)
2441 return get_d_value(NULL, pos, mode);
2444 /* set a value at position pos in the parameter array from the current block */
2446 set_value (int pos, ir_node *value)
2448 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2449 assert(pos+1 < current_ir_graph->n_loc);
2450 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2453 /* get the current store */
2457 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2458 /* GL: one could call get_value instead */
2459 inc_irg_visited(current_ir_graph);
2460 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2463 /* set the current store */
2465 set_store (ir_node *store)
2467 /* GL: one could call set_value instead */
2468 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2469 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2473 keep_alive (ir_node *ka)
2475 add_End_keepalive(current_ir_graph->end, ka);
2478 /** Useful access routines **/
2479 /* Returns the current block of the current graph. To set the current
2480 block use switch_block(). */
2481 ir_node *get_cur_block() {
2482 return get_irg_current_block(current_ir_graph);
2485 /* Returns the frame type of the current graph */
2486 type *get_cur_frame_type() {
2487 return get_irg_frame_type(current_ir_graph);
2491 /* ********************************************************************* */
2494 /* call once for each run of the library */
2496 init_cons (default_initialize_local_variable_func_t *func)
2498 default_initialize_local_variable = func;
2501 /* call for each graph */
2503 finalize_cons (ir_graph *irg) {
2504 irg->phase_state = phase_high;
2508 ir_node *new_Block(int arity, ir_node **in) {
2509 return new_d_Block(NULL, arity, in);
2511 ir_node *new_Start (void) {
2512 return new_d_Start(NULL);
2514 ir_node *new_End (void) {
2515 return new_d_End(NULL);
2517 ir_node *new_Jmp (void) {
2518 return new_d_Jmp(NULL);
2520 ir_node *new_Cond (ir_node *c) {
2521 return new_d_Cond(NULL, c);
2523 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2524 return new_d_Return(NULL, store, arity, in);
2526 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2527 return new_d_Raise(NULL, store, obj);
2529 ir_node *new_Const (ir_mode *mode, tarval *con) {
2530 return new_d_Const(NULL, mode, con);
2532 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2533 return new_d_SymConst(NULL, value, kind);
2535 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2536 return new_d_simpleSel(NULL, store, objptr, ent);
2538 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2540 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2542 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2543 return new_d_InstOf (NULL, store, objptr, ent);
2545 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2547 return new_d_Call(NULL, store, callee, arity, in, tp);
2549 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2550 return new_d_Add(NULL, op1, op2, mode);
2552 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2553 return new_d_Sub(NULL, op1, op2, mode);
2555 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2556 return new_d_Minus(NULL, op, mode);
2558 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2559 return new_d_Mul(NULL, op1, op2, mode);
2561 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2562 return new_d_Quot(NULL, memop, op1, op2);
2564 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2565 return new_d_DivMod(NULL, memop, op1, op2);
2567 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2568 return new_d_Div(NULL, memop, op1, op2);
2570 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2571 return new_d_Mod(NULL, memop, op1, op2);
2573 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2574 return new_d_Abs(NULL, op, mode);
2576 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2577 return new_d_And(NULL, op1, op2, mode);
2579 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2580 return new_d_Or(NULL, op1, op2, mode);
2582 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2583 return new_d_Eor(NULL, op1, op2, mode);
2585 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2586 return new_d_Not(NULL, op, mode);
2588 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2589 return new_d_Shl(NULL, op, k, mode);
2591 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2592 return new_d_Shr(NULL, op, k, mode);
2594 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2595 return new_d_Shrs(NULL, op, k, mode);
2597 #define new_Rotate new_Rot
2598 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2599 return new_d_Rot(NULL, op, k, mode);
2601 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2602 return new_d_Cmp(NULL, op1, op2);
2604 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2605 return new_d_Conv(NULL, op, mode);
2607 ir_node *new_Cast (ir_node *op, type *to_tp) {
2608 return new_d_Cast(NULL, op, to_tp);
2610 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2611 return new_d_Phi(NULL, arity, in, mode);
2613 ir_node *new_Load (ir_node *store, ir_node *addr) {
2614 return new_d_Load(NULL, store, addr);
2616 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2617 return new_d_Store(NULL, store, addr, val);
2619 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2620 where_alloc where) {
2621 return new_d_Alloc(NULL, store, size, alloc_type, where);
2623 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2625 return new_d_Free(NULL, store, ptr, size, free_type);
2627 ir_node *new_Sync (int arity, ir_node **in) {
2628 return new_d_Sync(NULL, arity, in);
2630 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2631 return new_d_Proj(NULL, arg, mode, proj);
2633 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2634 return new_d_defaultProj(NULL, arg, max_proj);
2636 ir_node *new_Tuple (int arity, ir_node **in) {
2637 return new_d_Tuple(NULL, arity, in);
2639 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2640 return new_d_Id(NULL, val, mode);
2642 ir_node *new_Bad (void) {
2645 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2646 return new_d_Confirm (NULL, val, bound, cmp);
2648 ir_node *new_Unknown(ir_mode *m) {
2649 return new_d_Unknown(m);
2651 ir_node *new_CallBegin (ir_node *callee) {
2652 return new_d_CallBegin(NULL, callee);
2654 ir_node *new_EndReg (void) {
2655 return new_d_EndReg(NULL);
2657 ir_node *new_EndExcept (void) {
2658 return new_d_EndExcept(NULL);
2660 ir_node *new_Break (void) {
2661 return new_d_Break(NULL);
2663 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2664 return new_d_Filter(NULL, arg, mode, proj);
2666 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2667 return new_d_FuncCall(NULL, callee, arity, in, tp);