3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /*** ******************************************** */
56 /** privat interfaces, for professional use only */
58 /* Constructs a Block with a fixed number of predecessors.
59 Does not set current_block. Can not be used with automatic
60 Phi node construction. */
62 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
66 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
67 set_Block_matured(res, 1);
68 set_Block_block_visited(res, 0);
70 /* res->attr.block.exc = exc_normal; */
71 /* res->attr.block.handler_entry = 0; */
72 res->attr.block.irg = irg;
73 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
74 res->attr.block.in_cg = NULL;
75 res->attr.block.cg_backedge = NULL;
77 IRN_VRFY_IRG(res, irg);
82 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
86 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
87 /* res->attr.start.irg = irg; */
89 IRN_VRFY_IRG(res, irg);
94 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
100 IRN_VRFY_IRG(res, irg);
104 /* Creates a Phi node with all predecessors. Calling this constructor
105 is only allowed if the corresponding block is mature. */
107 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
111 bool has_unknown = false;
113 /* Don't assert that block matured: the use of this constructor is strongly
115 if ( get_Block_matured(block) )
116 assert( get_irn_arity(block) == arity );
118 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
120 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
122 for (i = arity-1; i >= 0; i--)
123 if (get_irn_op(in[i]) == op_Unknown) {
128 if (!has_unknown) res = optimize_node (res);
129 IRN_VRFY_IRG(res, irg);
131 /* Memory Phis in endless loops must be kept alive.
132 As we can't distinguish these easily we keep all of them alive. */
133 if ((res->op == op_Phi) && (mode == mode_M))
134 add_End_keepalive(irg->end, res);
139 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
143 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
144 res->attr.con.tv = con;
145 set_Const_type(res, tp); /* Call method because of complex assertion. */
146 res = optimize_node (res);
147 assert(get_Const_type(res) == tp);
148 IRN_VRFY_IRG(res, irg);
154 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
156 type *tp = unknown_type;
157 /* removing this somehow causes errors in jack. */
158 if (tarval_is_entity(con))
159 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
161 return new_rd_Const_type (db, irg, block, mode, con, tp);
165 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
169 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
170 res = optimize_node(res);
171 IRN_VRFY_IRG(res, irg);
176 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
181 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
182 res->attr.proj = proj;
185 assert(get_Proj_pred(res));
186 assert(get_nodes_Block(get_Proj_pred(res)));
188 res = optimize_node(res);
190 IRN_VRFY_IRG(res, irg);
196 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
200 assert(arg->op == op_Cond);
201 arg->attr.c.kind = fragmentary;
202 arg->attr.c.default_proj = max_proj;
203 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
208 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
212 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
213 res = optimize_node(res);
214 IRN_VRFY_IRG(res, irg);
219 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
223 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
224 res->attr.cast.totype = to_tp;
225 res = optimize_node(res);
226 IRN_VRFY_IRG(res, irg);
231 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
235 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
236 res = optimize_node (res);
237 IRN_VRFY_IRG(res, irg);
242 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
243 ir_node *op1, ir_node *op2, ir_mode *mode)
250 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
251 res = optimize_node(res);
252 IRN_VRFY_IRG(res, irg);
257 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
258 ir_node *op1, ir_node *op2, ir_mode *mode)
265 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
266 res = optimize_node (res);
267 IRN_VRFY_IRG(res, irg);
272 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
273 ir_node *op, ir_mode *mode)
277 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
278 res = optimize_node(res);
279 IRN_VRFY_IRG(res, irg);
284 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
285 ir_node *op1, ir_node *op2, ir_mode *mode)
292 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
293 res = optimize_node(res);
294 IRN_VRFY_IRG(res, irg);
299 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
300 ir_node *memop, ir_node *op1, ir_node *op2)
308 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
309 res = optimize_node(res);
310 IRN_VRFY_IRG(res, irg);
315 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
325 res = optimize_node(res);
326 IRN_VRFY_IRG(res, irg);
331 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
340 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
347 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *memop, ir_node *op1, ir_node *op2)
356 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
357 res = optimize_node(res);
358 IRN_VRFY_IRG(res, irg);
363 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
364 ir_node *op1, ir_node *op2, ir_mode *mode)
371 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
372 res = optimize_node(res);
373 IRN_VRFY_IRG(res, irg);
378 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
379 ir_node *op1, ir_node *op2, ir_mode *mode)
386 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
387 res = optimize_node(res);
388 IRN_VRFY_IRG(res, irg);
393 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
394 ir_node *op1, ir_node *op2, ir_mode *mode)
401 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
402 res = optimize_node (res);
403 IRN_VRFY_IRG(res, irg);
408 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
409 ir_node *op, ir_mode *mode)
413 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
414 res = optimize_node(res);
415 IRN_VRFY_IRG(res, irg);
420 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
421 ir_node *op, ir_node *k, ir_mode *mode)
428 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
429 res = optimize_node(res);
430 IRN_VRFY_IRG(res, irg);
435 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
436 ir_node *op, ir_node *k, ir_mode *mode)
443 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
444 res = optimize_node(res);
445 IRN_VRFY_IRG(res, irg);
450 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
451 ir_node *op, ir_node *k, ir_mode *mode)
458 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
459 res = optimize_node(res);
460 IRN_VRFY_IRG(res, irg);
465 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
466 ir_node *op, ir_node *k, ir_mode *mode)
473 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
474 res = optimize_node(res);
475 IRN_VRFY_IRG(res, irg);
480 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
481 ir_node *op, ir_mode *mode)
485 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
486 res = optimize_node (res);
487 IRN_VRFY_IRG(res, irg);
492 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
493 ir_node *op1, ir_node *op2)
500 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
501 res = optimize_node(res);
502 IRN_VRFY_IRG(res, irg);
507 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
511 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
512 res = optimize_node (res);
513 IRN_VRFY_IRG (res, irg);
518 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
522 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
523 res->attr.c.kind = dense;
524 res->attr.c.default_proj = 0;
525 res = optimize_node (res);
526 IRN_VRFY_IRG(res, irg);
531 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
532 ir_node *callee, int arity, ir_node **in, type *tp)
539 NEW_ARR_A(ir_node *, r_in, r_arity);
542 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
544 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
546 assert(is_method_type(tp));
547 set_Call_type(res, tp);
548 res->attr.call.callee_arr = NULL;
549 res = optimize_node(res);
550 IRN_VRFY_IRG(res, irg);
555 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
556 ir_node *store, int arity, ir_node **in)
563 NEW_ARR_A (ir_node *, r_in, r_arity);
565 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
566 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
580 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
581 res = optimize_node(res);
582 IRN_VRFY_IRG(res, irg);
587 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
588 ir_node *store, ir_node *adr)
595 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
596 res = optimize_node(res);
597 IRN_VRFY_IRG(res, irg);
602 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
603 ir_node *store, ir_node *adr, ir_node *val)
611 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
618 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
619 ir_node *size, type *alloc_type, where_alloc where)
626 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
627 res->attr.a.where = where;
628 res->attr.a.type = alloc_type;
629 res = optimize_node(res);
630 IRN_VRFY_IRG(res, irg);
635 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
636 ir_node *ptr, ir_node *size, type *free_type)
644 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
645 res->attr.f = free_type;
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, irg);
652 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
653 int arity, ir_node **in, entity *ent)
659 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
662 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
665 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
666 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
667 res->attr.s.ent = ent;
668 res = optimize_node(res);
669 IRN_VRFY_IRG(res, irg);
674 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
675 ir_node *objptr, type *ent)
682 NEW_ARR_A(ir_node *, r_in, r_arity);
686 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
687 res->attr.io.ent = ent;
689 /* res = optimize(res); */
690 IRN_VRFY_IRG(res, irg);
695 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
696 symconst_kind symkind, type *tp)
701 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
705 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
707 res->attr.i.num = symkind;
708 res->attr.i.sym = value;
711 res = optimize_node(res);
712 IRN_VRFY_IRG(res, irg);
717 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
718 symconst_kind symkind)
720 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
724 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
725 symconst_symbol sym = {(type *)symbol};
726 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
729 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
730 symconst_symbol sym = {(type *)symbol};
731 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
734 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
735 symconst_symbol sym = {symbol};
736 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
739 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
740 symconst_symbol sym = {symbol};
741 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
745 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
749 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
750 res = optimize_node(res);
751 IRN_VRFY_IRG(res, irg);
756 new_rd_Bad (ir_graph *irg)
762 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
764 ir_node *in[2], *res;
768 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
769 res->attr.confirm_cmp = cmp;
770 res = optimize_node (res);
771 IRN_VRFY_IRG(res, irg);
776 new_rd_Unknown (ir_graph *irg, ir_mode *m)
778 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
782 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
787 in[0] = get_Call_ptr(call);
788 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
789 /* res->attr.callbegin.irg = irg; */
790 res->attr.callbegin.call = call;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
797 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
801 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
803 IRN_VRFY_IRG(res, irg);
808 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
812 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
813 irg->end_except = res;
814 IRN_VRFY_IRG (res, irg);
819 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
823 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
830 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
835 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
836 res->attr.filter.proj = proj;
837 res->attr.filter.in_cg = NULL;
838 res->attr.filter.backedge = NULL;
841 assert(get_Proj_pred(res));
842 assert(get_nodes_Block(get_Proj_pred(res)));
844 res = optimize_node(res);
845 IRN_VRFY_IRG(res, irg);
851 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
852 ir_node *callee, int arity, ir_node **in, type *tp)
859 NEW_ARR_A(ir_node *, r_in, r_arity);
861 memcpy(&r_in[1], in, sizeof (ir_node *) * arity);
863 res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
865 assert(is_method_type(tp));
866 set_FuncCall_type(res, tp);
867 res->attr.call.callee_arr = NULL;
868 res = optimize_node(res);
869 IRN_VRFY_IRG(res, irg);
874 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
875 return new_rd_Block(NULL, irg, arity, in);
877 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
878 return new_rd_Start(NULL, irg, block);
880 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
881 return new_rd_End(NULL, irg, block);
883 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
884 return new_rd_Jmp(NULL, irg, block);
886 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
887 return new_rd_Cond(NULL, irg, block, c);
889 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
890 ir_node *store, int arity, ir_node **in) {
891 return new_rd_Return(NULL, irg, block, store, arity, in);
893 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
894 ir_node *store, ir_node *obj) {
895 return new_rd_Raise(NULL, irg, block, store, obj);
897 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
898 ir_mode *mode, tarval *con) {
899 return new_rd_Const(NULL, irg, block, mode, con);
901 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
902 symconst_symbol value, symconst_kind symkind) {
903 return new_rd_SymConst(NULL, irg, block, value, symkind);
905 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
906 ir_node *objptr, int n_index, ir_node **index,
908 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
910 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
912 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
914 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
915 ir_node *callee, int arity, ir_node **in,
917 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
919 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
920 ir_node *op1, ir_node *op2, ir_mode *mode) {
921 return new_rd_Add(NULL, irg, block, op1, op2, mode);
923 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
924 ir_node *op1, ir_node *op2, ir_mode *mode) {
925 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
927 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
928 ir_node *op, ir_mode *mode) {
929 return new_rd_Minus(NULL, irg, block, op, mode);
931 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
932 ir_node *op1, ir_node *op2, ir_mode *mode) {
933 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
935 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
936 ir_node *memop, ir_node *op1, ir_node *op2) {
937 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
939 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
940 ir_node *memop, ir_node *op1, ir_node *op2) {
941 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
943 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
944 ir_node *memop, ir_node *op1, ir_node *op2) {
945 return new_rd_Div(NULL, irg, block, memop, op1, op2);
947 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
948 ir_node *memop, ir_node *op1, ir_node *op2) {
949 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
951 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
952 ir_node *op, ir_mode *mode) {
953 return new_rd_Abs(NULL, irg, block, op, mode);
955 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
956 ir_node *op1, ir_node *op2, ir_mode *mode) {
957 return new_rd_And(NULL, irg, block, op1, op2, mode);
959 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
960 ir_node *op1, ir_node *op2, ir_mode *mode) {
961 return new_rd_Or(NULL, irg, block, op1, op2, mode);
963 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
964 ir_node *op1, ir_node *op2, ir_mode *mode) {
965 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
967 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
968 ir_node *op, ir_mode *mode) {
969 return new_rd_Not(NULL, irg, block, op, mode);
971 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
972 ir_node *op1, ir_node *op2) {
973 return new_rd_Cmp(NULL, irg, block, op1, op2);
975 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
976 ir_node *op, ir_node *k, ir_mode *mode) {
977 return new_rd_Shl(NULL, irg, block, op, k, mode);
979 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
980 ir_node *op, ir_node *k, ir_mode *mode) {
981 return new_rd_Shr(NULL, irg, block, op, k, mode);
983 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
984 ir_node *op, ir_node *k, ir_mode *mode) {
985 return new_rd_Shrs(NULL, irg, block, op, k, mode);
987 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
988 ir_node *op, ir_node *k, ir_mode *mode) {
989 return new_rd_Rot(NULL, irg, block, op, k, mode);
991 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
992 ir_node *op, ir_mode *mode) {
993 return new_rd_Conv(NULL, irg, block, op, mode);
995 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
996 return new_rd_Cast(NULL, irg, block, op, to_tp);
998 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
999 ir_node **in, ir_mode *mode) {
1000 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1002 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1003 ir_node *store, ir_node *adr) {
1004 return new_rd_Load(NULL, irg, block, store, adr);
1006 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1007 ir_node *store, ir_node *adr, ir_node *val) {
1008 return new_rd_Store(NULL, irg, block, store, adr, val);
1010 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1011 ir_node *size, type *alloc_type, where_alloc where) {
1012 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1014 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1015 ir_node *ptr, ir_node *size, type *free_type) {
1016 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1018 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1019 return new_rd_Sync(NULL, irg, block, arity, in);
1021 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1022 ir_mode *mode, long proj) {
1023 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1025 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1027 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1029 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1030 int arity, ir_node **in) {
1031 return new_rd_Tuple(NULL, irg, block, arity, in );
1033 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1034 ir_node *val, ir_mode *mode) {
1035 return new_rd_Id(NULL, irg, block, val, mode);
1037 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1038 return new_rd_Bad(irg);
1040 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1041 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1043 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1044 return new_rd_Unknown(irg, m);
1046 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1047 return new_rd_CallBegin(NULL, irg, block, callee);
1049 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1050 return new_rd_EndReg(NULL, irg, block);
1052 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1053 return new_rd_EndExcept(NULL, irg, block);
1055 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1056 return new_rd_Break(NULL, irg, block);
1058 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1059 ir_mode *mode, long proj) {
1060 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1062 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1063 ir_node *callee, int arity, ir_node **in,
1065 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1069 /** ********************/
1070 /** public interfaces */
1071 /** construction tools */
1075 * - create a new Start node in the current block
1077 * @return s - pointer to the created Start node
1082 new_d_Start (dbg_info* db)
1086 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1087 op_Start, mode_T, 0, NULL);
1088 /* res->attr.start.irg = current_ir_graph; */
1090 res = optimize_node(res);
1091 IRN_VRFY_IRG(res, current_ir_graph);
1096 new_d_End (dbg_info* db)
1099 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1100 op_End, mode_X, -1, NULL);
1101 res = optimize_node(res);
1102 IRN_VRFY_IRG(res, current_ir_graph);
1107 /* Constructs a Block with a fixed number of predecessors.
1108 Does set current_block. Can be used with automatic Phi
1109 node construction. */
1111 new_d_Block (dbg_info* db, int arity, ir_node **in)
1115 bool has_unknown = false;
1117 res = new_rd_Block(db, current_ir_graph, arity, in);
1119 /* Create and initialize array for Phi-node construction. */
1120 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1121 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1122 current_ir_graph->n_loc);
1123 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1126 for (i = arity-1; i >= 0; i--)
1127 if (get_irn_op(in[i]) == op_Unknown) {
1132 if (!has_unknown) res = optimize_node(res);
1133 current_ir_graph->current_block = res;
1135 IRN_VRFY_IRG(res, current_ir_graph);
1140 /* ***********************************************************************/
1141 /* Methods necessary for automatic Phi node creation */
1143 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1144 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1145 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1146 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1148 Call Graph: ( A ---> B == A "calls" B)
1150 get_value mature_block
1158 get_r_value_internal |
1162 new_rd_Phi0 new_rd_Phi_in
1164 * *************************************************************************** */
1166 /** Creates a Phi node with 0 predecessors */
1167 static INLINE ir_node *
1168 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1172 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1173 IRN_VRFY_IRG(res, irg);
1177 /* There are two implementations of the Phi node construction. The first
1178 is faster, but does not work for blocks with more than 2 predecessors.
1179 The second works always but is slower and causes more unnecessary Phi
1181 Select the implementations by the following preprocessor flag set in
1183 #if USE_FAST_PHI_CONSTRUCTION
1185 /* This is a stack used for allocating and deallocating nodes in
1186 new_rd_Phi_in. The original implementation used the obstack
1187 to model this stack, now it is explicit. This reduces side effects.
1189 #if USE_EXPLICIT_PHI_IN_STACK
1190 INLINE Phi_in_stack *
1191 new_Phi_in_stack(void) {
1194 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1196 res->stack = NEW_ARR_F (ir_node *, 0);
1203 free_Phi_in_stack(Phi_in_stack *s) {
1204 DEL_ARR_F(s->stack);
1208 free_to_Phi_in_stack(ir_node *phi) {
1209 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1210 current_ir_graph->Phi_in_stack->pos)
1211 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1213 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1215 (current_ir_graph->Phi_in_stack->pos)++;
1218 static INLINE ir_node *
1219 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1220 int arity, ir_node **in) {
1222 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1223 int pos = current_ir_graph->Phi_in_stack->pos;
1227 /* We need to allocate a new node */
1228 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1229 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1231 /* reuse the old node and initialize it again. */
1234 assert (res->kind == k_ir_node);
1235 assert (res->op == op_Phi);
1239 assert (arity >= 0);
1240 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1241 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1243 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1245 (current_ir_graph->Phi_in_stack->pos)--;
1249 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1251 /* Creates a Phi node with a given, fixed array **in of predecessors.
1252 If the Phi node is unnecessary, as the same value reaches the block
1253 through all control flow paths, it is eliminated and the value
1254 returned directly. This constructor is only intended for use in
1255 the automatic Phi node generation triggered by get_value or mature.
1256 The implementation is quite tricky and depends on the fact, that
1257 the nodes are allocated on a stack:
1258 The in array contains predecessors and NULLs. The NULLs appear,
1259 if get_r_value_internal, that computed the predecessors, reached
1260 the same block on two paths. In this case the same value reaches
1261 this block on both paths, there is no definition in between. We need
1262 not allocate a Phi where these path's merge, but we have to communicate
1263 this fact to the caller. This happens by returning a pointer to the
1264 node the caller _will_ allocate. (Yes, we predict the address. We can
1265 do so because the nodes are allocated on the obstack.) The caller then
1266 finds a pointer to itself and, when this routine is called again,
1269 static INLINE ir_node *
1270 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1273 ir_node *res, *known;
1275 /* Allocate a new node on the obstack. This can return a node to
1276 which some of the pointers in the in-array already point.
1277 Attention: the constructor copies the in array, i.e., the later
1278 changes to the array in this routine do not affect the
1279 constructed node! If the in array contains NULLs, there will be
1280 missing predecessors in the returned node. Is this a possible
1281 internal state of the Phi node generation? */
1282 #if USE_EXPLICIT_PHI_IN_STACK
1283 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1285 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1286 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1289 /* The in-array can contain NULLs. These were returned by
1290 get_r_value_internal if it reached the same block/definition on a
1291 second path. The NULLs are replaced by the node itself to
1292 simplify the test in the next loop. */
1293 for (i = 0; i < ins; ++i) {
1298 /* This loop checks whether the Phi has more than one predecessor.
1299 If so, it is a real Phi node and we break the loop. Else the Phi
1300 node merges the same definition on several paths and therefore is
1302 for (i = 0; i < ins; ++i)
1304 if (in[i] == res || in[i] == known) continue;
1312 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1314 #if USE_EXPLICIT_PHI_IN_STACK
1315 free_to_Phi_in_stack(res);
1317 obstack_free (current_ir_graph->obst, res);
1321 res = optimize_node (res);
1322 IRN_VRFY_IRG(res, irg);
1325 /* return the pointer to the Phi node. This node might be deallocated! */
1330 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1333 allocates and returns this node. The routine called to allocate the
1334 node might optimize it away and return a real value, or even a pointer
1335 to a deallocated Phi node on top of the obstack!
1336 This function is called with an in-array of proper size. **/
1338 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1340 ir_node *prevBlock, *res;
1343 /* This loop goes to all predecessor blocks of the block the Phi node is in
1344 and there finds the operands of the Phi node by calling
1345 get_r_value_internal. */
1346 for (i = 1; i <= ins; ++i) {
1347 assert (block->in[i]);
1348 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1350 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1353 /* After collecting all predecessors into the array nin a new Phi node
1354 with these predecessors is created. This constructor contains an
1355 optimization: If all predecessors of the Phi node are identical it
1356 returns the only operand instead of a new Phi node. If the value
1357 passes two different control flow edges without being defined, and
1358 this is the second path treated, a pointer to the node that will be
1359 allocated for the first path (recursion) is returned. We already
1360 know the address of this node, as it is the next node to be allocated
1361 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1362 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1364 /* Now we now the value for "pos" and can enter it in the array with
1365 all known local variables. Attention: this might be a pointer to
1366 a node, that later will be allocated!!! See new_rd_Phi_in.
1367 If this is called in mature, after some set_value in the same block,
1368 the proper value must not be overwritten:
1370 get_value (makes Phi0, put's it into graph_arr)
1371 set_value (overwrites Phi0 in graph_arr)
1372 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1375 if (!block->attr.block.graph_arr[pos]) {
1376 block->attr.block.graph_arr[pos] = res;
1378 /* printf(" value already computed by %s\n",
1379 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1385 /* This function returns the last definition of a variable. In case
1386 this variable was last defined in a previous block, Phi nodes are
1387 inserted. If the part of the firm graph containing the definition
1388 is not yet constructed, a dummy Phi node is returned. */
1390 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1393 /* There are 4 cases to treat.
1395 1. The block is not mature and we visit it the first time. We can not
1396 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1397 predecessors is returned. This node is added to the linked list (field
1398 "link") of the containing block to be completed when this block is
1399 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1402 2. The value is already known in this block, graph_arr[pos] is set and we
1403 visit the block the first time. We can return the value without
1404 creating any new nodes.
1406 3. The block is mature and we visit it the first time. A Phi node needs
1407 to be created (phi_merge). If the Phi is not needed, as all it's
1408 operands are the same value reaching the block through different
1409 paths, it's optimized away and the value itself is returned.
1411 4. The block is mature, and we visit it the second time. Now two
1412 subcases are possible:
1413 * The value was computed completely the last time we were here. This
1414 is the case if there is no loop. We can return the proper value.
1415 * The recursion that visited this node and set the flag did not
1416 return yet. We are computing a value in a loop and need to
1417 break the recursion without knowing the result yet.
1418 @@@ strange case. Straight forward we would create a Phi before
1419 starting the computation of it's predecessors. In this case we will
1420 find a Phi here in any case. The problem is that this implementation
1421 only creates a Phi after computing the predecessors, so that it is
1422 hard to compute self references of this Phi. @@@
1423 There is no simple check for the second subcase. Therefore we check
1424 for a second visit and treat all such cases as the second subcase.
1425 Anyways, the basic situation is the same: we reached a block
1426 on two paths without finding a definition of the value: No Phi
1427 nodes are needed on both paths.
1428 We return this information "Two paths, no Phi needed" by a very tricky
1429 implementation that relies on the fact that an obstack is a stack and
1430 will return a node with the same address on different allocations.
1431 Look also at phi_merge and new_rd_phi_in to understand this.
1432 @@@ Unfortunately this does not work, see testprogram
1433 three_cfpred_example.
1437 /* case 4 -- already visited. */
1438 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1440 /* visited the first time */
1441 set_irn_visited(block, get_irg_visited(current_ir_graph));
1443 /* Get the local valid value */
1444 res = block->attr.block.graph_arr[pos];
1446 /* case 2 -- If the value is actually computed, return it. */
1447 if (res) return res;
1449 if (block->attr.block.matured) { /* case 3 */
1451 /* The Phi has the same amount of ins as the corresponding block. */
1452 int ins = get_irn_arity(block);
1454 NEW_ARR_A (ir_node *, nin, ins);
1456 /* Phi merge collects the predecessors and then creates a node. */
1457 res = phi_merge (block, pos, mode, nin, ins);
1459 } else { /* case 1 */
1460 /* The block is not mature, we don't know how many in's are needed. A Phi
1461 with zero predecessors is created. Such a Phi node is called Phi0
1462 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1463 to the list of Phi0 nodes in this block to be matured by mature_block
1465 The Phi0 has to remember the pos of it's internal value. If the real
1466 Phi is computed, pos is used to update the array with the local
1469 res = new_rd_Phi0 (current_ir_graph, block, mode);
1470 res->attr.phi0_pos = pos;
1471 res->link = block->link;
1475 /* If we get here, the frontend missed a use-before-definition error */
1478 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1479 assert (mode->code >= irm_F && mode->code <= irm_P);
1480 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1481 tarval_mode_null[mode->code]);
1484 /* The local valid value is available now. */
1485 block->attr.block.graph_arr[pos] = res;
1493 it starts the recursion. This causes an Id at the entry of
1494 every block that has no definition of the value! **/
1496 #if USE_EXPLICIT_PHI_IN_STACK
1498 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1499 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1502 static INLINE ir_node *
1503 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1504 ir_node **in, int ins, ir_node *phi0)
1507 ir_node *res, *known;
1509 /* Allocate a new node on the obstack. The allocation copies the in
1511 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1512 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1514 /* This loop checks whether the Phi has more than one predecessor.
1515 If so, it is a real Phi node and we break the loop. Else the
1516 Phi node merges the same definition on several paths and therefore
1517 is not needed. Don't consider Bad nodes! */
1519 for (i=0; i < ins; ++i)
1523 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1525 /* Optimize self referencing Phis: We can't detect them yet properly, as
1526 they still refer to the Phi0 they will replace. So replace right now. */
1527 if (phi0 && in[i] == phi0) in[i] = res;
1529 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1537 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1540 obstack_free (current_ir_graph->obst, res);
1541 if (is_Phi(known)) {
1542 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1543 order, an enclosing Phi know may get superfluous. */
1544 res = optimize_in_place_2(known);
1545 if (res != known) { exchange(known, res); }
1550 /* A undefined value, e.g., in unreachable code. */
1554 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1555 IRN_VRFY_IRG(res, irg);
1556 /* Memory Phis in endless loops must be kept alive.
1557 As we can't distinguish these easily we keep all of them alive. */
1558 if ((res->op == op_Phi) && (mode == mode_M))
1559 add_End_keepalive(irg->end, res);
1566 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1568 #if PRECISE_EXC_CONTEXT
1570 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1572 /* Construct a new frag_array for node n.
1573 Copy the content from the current graph_arr of the corresponding block:
1574 this is the current state.
1575 Set ProjM(n) as current memory state.
1576 Further the last entry in frag_arr of current block points to n. This
1577 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1579 static INLINE ir_node ** new_frag_arr (ir_node *n)
1584 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1585 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1586 sizeof(ir_node *)*current_ir_graph->n_loc);
1588 /* turn off optimization before allocating Proj nodes, as res isn't
1590 opt = get_opt_optimize(); set_optimize(0);
1591 /* Here we rely on the fact that all frag ops have Memory as first result! */
1592 if (get_irn_op(n) == op_Call)
1593 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1595 assert((pn_Quot_M == pn_DivMod_M) &&
1596 (pn_Quot_M == pn_Div_M) &&
1597 (pn_Quot_M == pn_Mod_M) &&
1598 (pn_Quot_M == pn_Load_M) &&
1599 (pn_Quot_M == pn_Store_M) &&
1600 (pn_Quot_M == pn_Alloc_M) );
1601 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1605 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1609 static INLINE ir_node **
1610 get_frag_arr (ir_node *n) {
1611 if (get_irn_op(n) == op_Call) {
1612 return n->attr.call.frag_arr;
1613 } else if (get_irn_op(n) == op_Alloc) {
1614 return n->attr.a.frag_arr;
1616 return n->attr.frag_arr;
1621 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1623 if (!frag_arr[pos]) frag_arr[pos] = val;
1624 if (frag_arr[current_ir_graph->n_loc - 1]) {
1625 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1626 assert(arr != frag_arr && "Endless recursion detected");
1627 set_frag_value(arr, pos, val);
1632 for (i = 0; i < 1000; ++i) {
1633 if (!frag_arr[pos]) {
1634 frag_arr[pos] = val;
1636 if (frag_arr[current_ir_graph->n_loc - 1]) {
1637 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1643 assert(0 && "potential endless recursion");
1648 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1652 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1654 frag_arr = get_frag_arr(cfOp);
1655 res = frag_arr[pos];
1657 if (block->attr.block.graph_arr[pos]) {
1658 /* There was a set_value after the cfOp and no get_value before that
1659 set_value. We must build a Phi node now. */
1660 if (block->attr.block.matured) {
1661 int ins = get_irn_arity(block);
1663 NEW_ARR_A (ir_node *, nin, ins);
1664 res = phi_merge(block, pos, mode, nin, ins);
1666 res = new_rd_Phi0 (current_ir_graph, block, mode);
1667 res->attr.phi0_pos = pos;
1668 res->link = block->link;
1672 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1673 but this should be better: (remove comment if this works) */
1674 /* It's a Phi, we can write this into all graph_arrs with NULL */
1675 set_frag_value(block->attr.block.graph_arr, pos, res);
1677 res = get_r_value_internal(block, pos, mode);
1678 set_frag_value(block->attr.block.graph_arr, pos, res);
1686 computes the predecessors for the real phi node, and then
1687 allocates and returns this node. The routine called to allocate the
1688 node might optimize it away and return a real value.
1689 This function must be called with an in-array of proper size. **/
1691 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1693 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1696 /* If this block has no value at pos create a Phi0 and remember it
1697 in graph_arr to break recursions.
1698 Else we may not set graph_arr as there a later value is remembered. */
1700 if (!block->attr.block.graph_arr[pos]) {
1701 if (block == get_irg_start_block(current_ir_graph)) {
1702 /* Collapsing to Bad tarvals is no good idea.
1703 So we call a user-supplied routine here that deals with this case as
1704 appropriate for the given language. Sorryly the only help we can give
1705 here is the position.
1707 Even if all variables are defined before use, it can happen that
1708 we get to the start block, if a cond has been replaced by a tuple
1709 (bad, jmp). In this case we call the function needlessly, eventually
1710 generating an non existant error.
1711 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1714 if (default_initialize_local_variable)
1715 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1717 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1718 /* We don't need to care about exception ops in the start block.
1719 There are none by definition. */
1720 return block->attr.block.graph_arr[pos];
1722 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1723 block->attr.block.graph_arr[pos] = phi0;
1724 #if PRECISE_EXC_CONTEXT
1725 if (get_opt_precise_exc_context()) {
1726 /* Set graph_arr for fragile ops. Also here we should break recursion.
1727 We could choose a cyclic path through an cfop. But the recursion would
1728 break at some point. */
1729 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1735 /* This loop goes to all predecessor blocks of the block the Phi node
1736 is in and there finds the operands of the Phi node by calling
1737 get_r_value_internal. */
1738 for (i = 1; i <= ins; ++i) {
1739 prevCfOp = skip_Proj(block->in[i]);
1741 if (is_Bad(prevCfOp)) {
1742 /* In case a Cond has been optimized we would get right to the start block
1743 with an invalid definition. */
1744 nin[i-1] = new_Bad();
1747 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1749 if (!is_Bad(prevBlock)) {
1750 #if PRECISE_EXC_CONTEXT
1751 if (get_opt_precise_exc_context() &&
1752 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1753 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1754 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1757 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1759 nin[i-1] = new_Bad();
1763 /* We want to pass the Phi0 node to the constructor: this finds additional
1764 optimization possibilities.
1765 The Phi0 node either is allocated in this function, or it comes from
1766 a former call to get_r_value_internal. In this case we may not yet
1767 exchange phi0, as this is done in mature_block. */
1769 phi0_all = block->attr.block.graph_arr[pos];
1770 if (!((get_irn_op(phi0_all) == op_Phi) &&
1771 (get_irn_arity(phi0_all) == 0) &&
1772 (get_nodes_block(phi0_all) == block)))
1778 /* After collecting all predecessors into the array nin a new Phi node
1779 with these predecessors is created. This constructor contains an
1780 optimization: If all predecessors of the Phi node are identical it
1781 returns the only operand instead of a new Phi node. */
1782 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1784 /* In case we allocated a Phi0 node at the beginning of this procedure,
1785 we need to exchange this Phi0 with the real Phi. */
1787 exchange(phi0, res);
1788 block->attr.block.graph_arr[pos] = res;
1789 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1790 only an optimization. */
1796 /* This function returns the last definition of a variable. In case
1797 this variable was last defined in a previous block, Phi nodes are
1798 inserted. If the part of the firm graph containing the definition
1799 is not yet constructed, a dummy Phi node is returned. */
1801 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1804 /* There are 4 cases to treat.
1806 1. The block is not mature and we visit it the first time. We can not
1807 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1808 predecessors is returned. This node is added to the linked list (field
1809 "link") of the containing block to be completed when this block is
1810 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1813 2. The value is already known in this block, graph_arr[pos] is set and we
1814 visit the block the first time. We can return the value without
1815 creating any new nodes.
1817 3. The block is mature and we visit it the first time. A Phi node needs
1818 to be created (phi_merge). If the Phi is not needed, as all it's
1819 operands are the same value reaching the block through different
1820 paths, it's optimized away and the value itself is returned.
1822 4. The block is mature, and we visit it the second time. Now two
1823 subcases are possible:
1824 * The value was computed completely the last time we were here. This
1825 is the case if there is no loop. We can return the proper value.
1826 * The recursion that visited this node and set the flag did not
1827 return yet. We are computing a value in a loop and need to
1828 break the recursion. This case only happens if we visited
1829 the same block with phi_merge before, which inserted a Phi0.
1830 So we return the Phi0.
1833 /* case 4 -- already visited. */
1834 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1835 /* As phi_merge allocates a Phi0 this value is always defined. Here
1836 is the critical difference of the two algorithms. */
1837 assert(block->attr.block.graph_arr[pos]);
1838 return block->attr.block.graph_arr[pos];
1841 /* visited the first time */
1842 set_irn_visited(block, get_irg_visited(current_ir_graph));
1844 /* Get the local valid value */
1845 res = block->attr.block.graph_arr[pos];
1847 /* case 2 -- If the value is actually computed, return it. */
1848 if (res) { return res; };
1850 if (block->attr.block.matured) { /* case 3 */
1852 /* The Phi has the same amount of ins as the corresponding block. */
1853 int ins = get_irn_arity(block);
1855 NEW_ARR_A (ir_node *, nin, ins);
1857 /* Phi merge collects the predecessors and then creates a node. */
1858 res = phi_merge (block, pos, mode, nin, ins);
1860 } else { /* case 1 */
1861 /* The block is not mature, we don't know how many in's are needed. A Phi
1862 with zero predecessors is created. Such a Phi node is called Phi0
1863 node. The Phi0 is then added to the list of Phi0 nodes in this block
1864 to be matured by mature_block later.
1865 The Phi0 has to remember the pos of it's internal value. If the real
1866 Phi is computed, pos is used to update the array with the local
1868 res = new_rd_Phi0 (current_ir_graph, block, mode);
1869 res->attr.phi0_pos = pos;
1870 res->link = block->link;
1874 /* If we get here, the frontend missed a use-before-definition error */
1877 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1878 assert (mode->code >= irm_F && mode->code <= irm_P);
1879 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1880 get_mode_null(mode));
1883 /* The local valid value is available now. */
1884 block->attr.block.graph_arr[pos] = res;
1889 #endif /* USE_FAST_PHI_CONSTRUCTION */
1891 /* ************************************************************************** */
1893 /** Finalize a Block node, when all control flows are known. */
1894 /** Acceptable parameters are only Block nodes. */
1896 mature_block (ir_node *block)
1903 assert (get_irn_opcode(block) == iro_Block);
1904 /* @@@ should be commented in
1905 assert (!get_Block_matured(block) && "Block already matured"); */
1907 if (!get_Block_matured(block)) {
1908 ins = ARR_LEN (block->in)-1;
1909 /* Fix block parameters */
1910 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1912 /* An array for building the Phi nodes. */
1913 NEW_ARR_A (ir_node *, nin, ins);
1915 /* Traverse a chain of Phi nodes attached to this block and mature
1917 for (n = block->link; n; n=next) {
1918 inc_irg_visited(current_ir_graph);
1920 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1923 block->attr.block.matured = 1;
1925 /* Now, as the block is a finished firm node, we can optimize it.
1926 Since other nodes have been allocated since the block was created
1927 we can not free the node on the obstack. Therefore we have to call
1929 Unfortunately the optimization does not change a lot, as all allocated
1930 nodes refer to the unoptimized node.
1931 We can call _2, as global cse has no effect on blocks. */
1932 block = optimize_in_place_2(block);
1933 IRN_VRFY_IRG(block, current_ir_graph);
1938 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1940 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1945 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1947 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1952 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1954 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1960 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1962 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1967 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1969 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1974 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1977 assert(arg->op == op_Cond);
1978 arg->attr.c.kind = fragmentary;
1979 arg->attr.c.default_proj = max_proj;
1980 res = new_Proj (arg, mode_X, max_proj);
1985 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1987 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
1992 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1994 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1998 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2000 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2005 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2007 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2012 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2014 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2020 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2022 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2027 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2029 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2034 * allocate the frag array
2036 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2037 if (get_opt_precise_exc_context()) {
2038 if ((current_ir_graph->phase_state == phase_building) &&
2039 (get_irn_op(res) == op) && /* Could be optimized away. */
2040 !*frag_store) /* Could be a cse where the arr is already set. */ {
2041 *frag_store = new_frag_arr(res);
2048 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2051 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2053 #if PRECISE_EXC_CONTEXT
2054 allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */
2061 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2064 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2066 #if PRECISE_EXC_CONTEXT
2067 allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */
2074 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2077 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2079 #if PRECISE_EXC_CONTEXT
2080 allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */
2087 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2090 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2092 #if PRECISE_EXC_CONTEXT
2093 allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */
2100 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2102 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2107 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2109 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2114 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2116 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2121 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2123 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2128 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2130 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2135 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2137 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2142 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2144 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2149 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2151 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2156 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2158 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2163 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2165 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2170 new_d_Jmp (dbg_info* db)
2172 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2176 new_d_Cond (dbg_info* db, ir_node *c)
2178 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2182 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2186 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2187 store, callee, arity, in, tp);
2188 #if PRECISE_EXC_CONTEXT
2189 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2196 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2198 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2203 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2205 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2210 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2213 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2215 #if PRECISE_EXC_CONTEXT
2216 allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */
2223 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2226 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2228 #if PRECISE_EXC_CONTEXT
2229 allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */
2236 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2240 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2241 store, size, alloc_type, where);
2242 #if PRECISE_EXC_CONTEXT
2243 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2250 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2252 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2253 store, ptr, size, free_type);
2257 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2258 /* GL: objptr was called frame before. Frame was a bad choice for the name
2259 as the operand could as well be a pointer to a dynamic object. */
2261 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2262 store, objptr, 0, NULL, ent);
2266 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2268 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2269 store, objptr, n_index, index, sel);
2273 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2275 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2276 store, objptr, ent));
2280 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2282 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2287 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2289 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2294 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2296 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2304 return __new_d_Bad();
2308 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2310 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2315 new_d_Unknown (ir_mode *m)
2317 return new_rd_Unknown(current_ir_graph, m);
2321 new_d_CallBegin (dbg_info *db, ir_node *call)
2324 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2329 new_d_EndReg (dbg_info *db)
2332 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2337 new_d_EndExcept (dbg_info *db)
2340 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2345 new_d_Break (dbg_info *db)
2347 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2351 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2353 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2358 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2362 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2363 callee, arity, in, tp);
2368 /* ********************************************************************* */
2369 /* Comfortable interface with automatic Phi node construction. */
2370 /* (Uses also constructors of ?? interface, except new_Block. */
2371 /* ********************************************************************* */
2373 /* * Block construction **/
2374 /* immature Block without predecessors */
2375 ir_node *new_d_immBlock (dbg_info* db) {
2378 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2379 /* creates a new dynamic in-array as length of in is -1 */
2380 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2381 current_ir_graph->current_block = res;
2382 res->attr.block.matured = 0;
2383 /* res->attr.block.exc = exc_normal; */
2384 /* res->attr.block.handler_entry = 0; */
2385 res->attr.block.irg = current_ir_graph;
2386 res->attr.block.backedge = NULL;
2387 res->attr.block.in_cg = NULL;
2388 res->attr.block.cg_backedge = NULL;
2389 set_Block_block_visited(res, 0);
2391 /* Create and initialize array for Phi-node construction. */
2392 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2393 current_ir_graph->n_loc);
2394 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2396 /* Immature block may not be optimized! */
2397 IRN_VRFY_IRG(res, current_ir_graph);
2403 new_immBlock (void) {
2404 return new_d_immBlock(NULL);
2407 /* add an adge to a jmp/control flow node */
2409 add_in_edge (ir_node *block, ir_node *jmp)
2411 if (block->attr.block.matured) {
2412 assert(0 && "Error: Block already matured!\n");
2415 assert(jmp != NULL);
2416 ARR_APP1(ir_node *, block->in, jmp);
2420 /* changing the current block */
2422 switch_block (ir_node *target)
2424 current_ir_graph->current_block = target;
2427 /* ************************ */
2428 /* parameter administration */
2430 /* get a value from the parameter array from the current block by its index */
2432 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2434 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2435 inc_irg_visited(current_ir_graph);
2437 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2439 /* get a value from the parameter array from the current block by its index */
2441 get_value (int pos, ir_mode *mode)
2443 return get_d_value(NULL, pos, mode);
2446 /* set a value at position pos in the parameter array from the current block */
2448 set_value (int pos, ir_node *value)
2450 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2451 assert(pos+1 < current_ir_graph->n_loc);
2452 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2455 /* get the current store */
2459 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2460 /* GL: one could call get_value instead */
2461 inc_irg_visited(current_ir_graph);
2462 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2465 /* set the current store */
2467 set_store (ir_node *store)
2469 /* GL: one could call set_value instead */
2470 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2471 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2475 keep_alive (ir_node *ka)
2477 add_End_keepalive(current_ir_graph->end, ka);
2480 /** Useful access routines **/
2481 /* Returns the current block of the current graph. To set the current
2482 block use switch_block(). */
2483 ir_node *get_cur_block() {
2484 return get_irg_current_block(current_ir_graph);
2487 /* Returns the frame type of the current graph */
2488 type *get_cur_frame_type() {
2489 return get_irg_frame_type(current_ir_graph);
2493 /* ********************************************************************* */
2496 /* call once for each run of the library */
2498 init_cons (default_initialize_local_variable_func_t *func)
2500 default_initialize_local_variable = func;
2503 /* call for each graph */
2505 finalize_cons (ir_graph *irg) {
2506 irg->phase_state = phase_high;
2510 ir_node *new_Block(int arity, ir_node **in) {
2511 return new_d_Block(NULL, arity, in);
2513 ir_node *new_Start (void) {
2514 return new_d_Start(NULL);
2516 ir_node *new_End (void) {
2517 return new_d_End(NULL);
2519 ir_node *new_Jmp (void) {
2520 return new_d_Jmp(NULL);
2522 ir_node *new_Cond (ir_node *c) {
2523 return new_d_Cond(NULL, c);
2525 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2526 return new_d_Return(NULL, store, arity, in);
2528 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2529 return new_d_Raise(NULL, store, obj);
2531 ir_node *new_Const (ir_mode *mode, tarval *con) {
2532 return new_d_Const(NULL, mode, con);
2534 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2535 return new_d_SymConst(NULL, value, kind);
2537 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2538 return new_d_simpleSel(NULL, store, objptr, ent);
2540 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2542 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2544 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2545 return new_d_InstOf (NULL, store, objptr, ent);
2547 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2549 return new_d_Call(NULL, store, callee, arity, in, tp);
2551 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2552 return new_d_Add(NULL, op1, op2, mode);
2554 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2555 return new_d_Sub(NULL, op1, op2, mode);
2557 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2558 return new_d_Minus(NULL, op, mode);
2560 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2561 return new_d_Mul(NULL, op1, op2, mode);
2563 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2564 return new_d_Quot(NULL, memop, op1, op2);
2566 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2567 return new_d_DivMod(NULL, memop, op1, op2);
2569 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2570 return new_d_Div(NULL, memop, op1, op2);
2572 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2573 return new_d_Mod(NULL, memop, op1, op2);
2575 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2576 return new_d_Abs(NULL, op, mode);
2578 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2579 return new_d_And(NULL, op1, op2, mode);
2581 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2582 return new_d_Or(NULL, op1, op2, mode);
2584 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2585 return new_d_Eor(NULL, op1, op2, mode);
2587 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2588 return new_d_Not(NULL, op, mode);
2590 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2591 return new_d_Shl(NULL, op, k, mode);
2593 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2594 return new_d_Shr(NULL, op, k, mode);
2596 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2597 return new_d_Shrs(NULL, op, k, mode);
2599 #define new_Rotate new_Rot
2600 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2601 return new_d_Rot(NULL, op, k, mode);
2603 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2604 return new_d_Cmp(NULL, op1, op2);
2606 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2607 return new_d_Conv(NULL, op, mode);
2609 ir_node *new_Cast (ir_node *op, type *to_tp) {
2610 return new_d_Cast(NULL, op, to_tp);
2612 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2613 return new_d_Phi(NULL, arity, in, mode);
2615 ir_node *new_Load (ir_node *store, ir_node *addr) {
2616 return new_d_Load(NULL, store, addr);
2618 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2619 return new_d_Store(NULL, store, addr, val);
2621 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2622 where_alloc where) {
2623 return new_d_Alloc(NULL, store, size, alloc_type, where);
2625 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2627 return new_d_Free(NULL, store, ptr, size, free_type);
2629 ir_node *new_Sync (int arity, ir_node **in) {
2630 return new_d_Sync(NULL, arity, in);
2632 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2633 return new_d_Proj(NULL, arg, mode, proj);
2635 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2636 return new_d_defaultProj(NULL, arg, max_proj);
2638 ir_node *new_Tuple (int arity, ir_node **in) {
2639 return new_d_Tuple(NULL, arity, in);
2641 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2642 return new_d_Id(NULL, val, mode);
2644 ir_node *new_Bad (void) {
2647 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2648 return new_d_Confirm (NULL, val, bound, cmp);
2650 ir_node *new_Unknown(ir_mode *m) {
2651 return new_d_Unknown(m);
2653 ir_node *new_CallBegin (ir_node *callee) {
2654 return new_d_CallBegin(NULL, callee);
2656 ir_node *new_EndReg (void) {
2657 return new_d_EndReg(NULL);
2659 ir_node *new_EndExcept (void) {
2660 return new_d_EndExcept(NULL);
2662 ir_node *new_Break (void) {
2663 return new_d_Break(NULL);
2665 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2666 return new_d_Filter(NULL, arg, mode, proj);
2668 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2669 return new_d_FuncCall(NULL, callee, arity, in, tp);