3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /*** ******************************************** */
56 /** privat interfaces, for professional use only */
58 /* Constructs a Block with a fixed number of predecessors.
59 Does not set current_block. Can not be used with automatic
60 Phi node construction. */
62 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
66 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
67 set_Block_matured(res, 1);
68 set_Block_block_visited(res, 0);
70 /* res->attr.block.exc = exc_normal; */
71 /* res->attr.block.handler_entry = 0; */
72 res->attr.block.irg = irg;
73 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
74 res->attr.block.in_cg = NULL;
75 res->attr.block.cg_backedge = NULL;
77 IRN_VRFY_IRG(res, irg);
82 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
86 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
87 /* res->attr.start.irg = irg; */
89 IRN_VRFY_IRG(res, irg);
94 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
100 IRN_VRFY_IRG(res, irg);
104 /* Creates a Phi node with all predecessors. Calling this constructor
105 is only allowed if the corresponding block is mature. */
107 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
111 bool has_unknown = false;
113 /* Don't assert that block matured: the use of this constructor is strongly
115 if ( get_Block_matured(block) )
116 assert( get_irn_arity(block) == arity );
118 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
120 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
122 for (i = arity-1; i >= 0; i--)
123 if (get_irn_op(in[i]) == op_Unknown) {
128 if (!has_unknown) res = optimize_node (res);
129 IRN_VRFY_IRG(res, irg);
131 /* Memory Phis in endless loops must be kept alive.
132 As we can't distinguish these easily we keep all of them alive. */
133 if ((res->op == op_Phi) && (mode == mode_M))
134 add_End_keepalive(irg->end, res);
139 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
143 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
144 res->attr.con.tv = con;
145 set_Const_type(res, tp); /* Call method because of complex assertion. */
146 res = optimize_node (res);
147 assert(get_Const_type(res) == tp);
148 IRN_VRFY_IRG(res, irg);
154 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
156 type *tp = unknown_type;
157 /* removing this somehow causes errors in jack. */
158 if (tarval_is_entity(con))
159 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
161 return new_rd_Const_type (db, irg, block, mode, con, tp);
165 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
169 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
170 res = optimize_node(res);
171 IRN_VRFY_IRG(res, irg);
176 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
181 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
182 res->attr.proj = proj;
185 assert(get_Proj_pred(res));
186 assert(get_nodes_Block(get_Proj_pred(res)));
188 res = optimize_node(res);
190 IRN_VRFY_IRG(res, irg);
196 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
200 assert(arg->op == op_Cond);
201 arg->attr.c.kind = fragmentary;
202 arg->attr.c.default_proj = max_proj;
203 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
208 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
212 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
213 res = optimize_node(res);
214 IRN_VRFY_IRG(res, irg);
219 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
223 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
224 res->attr.cast.totype = to_tp;
225 res = optimize_node(res);
226 IRN_VRFY_IRG(res, irg);
231 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
235 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
236 res = optimize_node (res);
237 IRN_VRFY_IRG(res, irg);
242 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
243 ir_node *op1, ir_node *op2, ir_mode *mode)
250 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
251 res = optimize_node(res);
252 IRN_VRFY_IRG(res, irg);
257 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
258 ir_node *op1, ir_node *op2, ir_mode *mode)
265 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
266 res = optimize_node (res);
267 IRN_VRFY_IRG(res, irg);
272 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
273 ir_node *op, ir_mode *mode)
277 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
278 res = optimize_node(res);
279 IRN_VRFY_IRG(res, irg);
284 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
285 ir_node *op1, ir_node *op2, ir_mode *mode)
292 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
293 res = optimize_node(res);
294 IRN_VRFY_IRG(res, irg);
299 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
300 ir_node *memop, ir_node *op1, ir_node *op2)
308 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
309 res = optimize_node(res);
310 IRN_VRFY_IRG(res, irg);
315 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
325 res = optimize_node(res);
326 IRN_VRFY_IRG(res, irg);
331 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
340 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
347 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *memop, ir_node *op1, ir_node *op2)
356 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
357 res = optimize_node(res);
358 IRN_VRFY_IRG(res, irg);
363 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
364 ir_node *op1, ir_node *op2, ir_mode *mode)
371 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
372 res = optimize_node(res);
373 IRN_VRFY_IRG(res, irg);
378 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
379 ir_node *op1, ir_node *op2, ir_mode *mode)
386 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
387 res = optimize_node(res);
388 IRN_VRFY_IRG(res, irg);
393 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
394 ir_node *op1, ir_node *op2, ir_mode *mode)
401 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
402 res = optimize_node (res);
403 IRN_VRFY_IRG(res, irg);
408 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
409 ir_node *op, ir_mode *mode)
413 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
414 res = optimize_node(res);
415 IRN_VRFY_IRG(res, irg);
420 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
421 ir_node *op, ir_node *k, ir_mode *mode)
428 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
429 res = optimize_node(res);
430 IRN_VRFY_IRG(res, irg);
435 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
436 ir_node *op, ir_node *k, ir_mode *mode)
443 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
444 res = optimize_node(res);
445 IRN_VRFY_IRG(res, irg);
450 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
451 ir_node *op, ir_node *k, ir_mode *mode)
458 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
459 res = optimize_node(res);
460 IRN_VRFY_IRG(res, irg);
465 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
466 ir_node *op, ir_node *k, ir_mode *mode)
473 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
474 res = optimize_node(res);
475 IRN_VRFY_IRG(res, irg);
480 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
481 ir_node *op, ir_mode *mode)
485 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
486 res = optimize_node (res);
487 IRN_VRFY_IRG(res, irg);
492 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
493 ir_node *op1, ir_node *op2)
500 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
501 res = optimize_node(res);
502 IRN_VRFY_IRG(res, irg);
507 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
511 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
512 res = optimize_node (res);
513 IRN_VRFY_IRG (res, irg);
518 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
522 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
523 res->attr.c.kind = dense;
524 res->attr.c.default_proj = 0;
525 res = optimize_node (res);
526 IRN_VRFY_IRG(res, irg);
531 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
532 ir_node *callee, int arity, ir_node **in, type *tp)
539 NEW_ARR_A(ir_node *, r_in, r_arity);
542 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
544 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
546 assert(is_method_type(tp));
547 set_Call_type(res, tp);
548 res->attr.call.callee_arr = NULL;
549 res = optimize_node(res);
550 IRN_VRFY_IRG(res, irg);
555 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
556 ir_node *store, int arity, ir_node **in)
563 NEW_ARR_A (ir_node *, r_in, r_arity);
565 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
566 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
580 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
581 res = optimize_node(res);
582 IRN_VRFY_IRG(res, irg);
587 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
588 ir_node *store, ir_node *adr)
595 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
596 res = optimize_node(res);
597 IRN_VRFY_IRG(res, irg);
602 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
603 ir_node *store, ir_node *adr, ir_node *val)
611 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
618 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
619 ir_node *size, type *alloc_type, where_alloc where)
626 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
627 res->attr.a.where = where;
628 res->attr.a.type = alloc_type;
629 res = optimize_node(res);
630 IRN_VRFY_IRG(res, irg);
635 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
636 ir_node *ptr, ir_node *size, type *free_type)
644 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
645 res->attr.f = free_type;
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, irg);
652 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
653 int arity, ir_node **in, entity *ent)
659 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
662 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
665 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
666 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
667 res->attr.s.ent = ent;
668 res = optimize_node(res);
669 IRN_VRFY_IRG(res, irg);
674 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
675 ir_node *objptr, type *ent)
682 NEW_ARR_A(ir_node *, r_in, r_arity);
686 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
687 res->attr.io.ent = ent;
689 /* res = optimize(res); */
690 IRN_VRFY_IRG(res, irg);
695 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
696 symconst_kind symkind, type *tp)
701 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
705 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
707 res->attr.i.num = symkind;
708 res->attr.i.sym = value;
711 res = optimize_node(res);
712 IRN_VRFY_IRG(res, irg);
717 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
718 symconst_kind symkind)
720 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
725 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
729 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
730 res = optimize_node(res);
731 IRN_VRFY_IRG(res, irg);
736 new_rd_Bad (ir_graph *irg)
742 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
744 ir_node *in[2], *res;
748 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
749 res->attr.confirm_cmp = cmp;
750 res = optimize_node (res);
751 IRN_VRFY_IRG(res, irg);
756 new_rd_Unknown (ir_graph *irg, ir_mode *m)
758 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
762 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
767 in[0] = get_Call_ptr(call);
768 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
769 /* res->attr.callbegin.irg = irg; */
770 res->attr.callbegin.call = call;
771 res = optimize_node(res);
772 IRN_VRFY_IRG(res, irg);
777 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
781 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
783 IRN_VRFY_IRG(res, irg);
788 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
792 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
793 irg->end_except = res;
794 IRN_VRFY_IRG (res, irg);
799 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
803 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
804 res = optimize_node(res);
805 IRN_VRFY_IRG(res, irg);
810 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
815 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
816 res->attr.filter.proj = proj;
817 res->attr.filter.in_cg = NULL;
818 res->attr.filter.backedge = NULL;
821 assert(get_Proj_pred(res));
822 assert(get_nodes_Block(get_Proj_pred(res)));
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
831 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
832 ir_node *callee, int arity, ir_node **in, type *tp)
839 NEW_ARR_A(ir_node *, r_in, r_arity);
841 memcpy(&r_in[1], in, sizeof (ir_node *) * arity);
843 res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
845 assert(is_method_type(tp));
846 set_FuncCall_type(res, tp);
847 res->attr.call.callee_arr = NULL;
848 res = optimize_node(res);
849 IRN_VRFY_IRG(res, irg);
854 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
855 return new_rd_Block(NULL, irg, arity, in);
857 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
858 return new_rd_Start(NULL, irg, block);
860 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
861 return new_rd_End(NULL, irg, block);
863 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
864 return new_rd_Jmp(NULL, irg, block);
866 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
867 return new_rd_Cond(NULL, irg, block, c);
869 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
870 ir_node *store, int arity, ir_node **in) {
871 return new_rd_Return(NULL, irg, block, store, arity, in);
873 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
874 ir_node *store, ir_node *obj) {
875 return new_rd_Raise(NULL, irg, block, store, obj);
877 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
878 ir_mode *mode, tarval *con) {
879 return new_rd_Const(NULL, irg, block, mode, con);
881 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
882 symconst_symbol value, symconst_kind symkind) {
883 return new_rd_SymConst(NULL, irg, block, value, symkind);
885 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
886 ir_node *objptr, int n_index, ir_node **index,
888 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
890 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
892 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
894 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
895 ir_node *callee, int arity, ir_node **in,
897 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
899 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
900 ir_node *op1, ir_node *op2, ir_mode *mode) {
901 return new_rd_Add(NULL, irg, block, op1, op2, mode);
903 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
904 ir_node *op1, ir_node *op2, ir_mode *mode) {
905 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
907 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
908 ir_node *op, ir_mode *mode) {
909 return new_rd_Minus(NULL, irg, block, op, mode);
911 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
912 ir_node *op1, ir_node *op2, ir_mode *mode) {
913 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
915 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
916 ir_node *memop, ir_node *op1, ir_node *op2) {
917 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
919 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
920 ir_node *memop, ir_node *op1, ir_node *op2) {
921 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
923 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
924 ir_node *memop, ir_node *op1, ir_node *op2) {
925 return new_rd_Div(NULL, irg, block, memop, op1, op2);
927 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
928 ir_node *memop, ir_node *op1, ir_node *op2) {
929 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
931 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
932 ir_node *op, ir_mode *mode) {
933 return new_rd_Abs(NULL, irg, block, op, mode);
935 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
936 ir_node *op1, ir_node *op2, ir_mode *mode) {
937 return new_rd_And(NULL, irg, block, op1, op2, mode);
939 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
940 ir_node *op1, ir_node *op2, ir_mode *mode) {
941 return new_rd_Or(NULL, irg, block, op1, op2, mode);
943 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
944 ir_node *op1, ir_node *op2, ir_mode *mode) {
945 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
947 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
948 ir_node *op, ir_mode *mode) {
949 return new_rd_Not(NULL, irg, block, op, mode);
951 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
952 ir_node *op1, ir_node *op2) {
953 return new_rd_Cmp(NULL, irg, block, op1, op2);
955 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
956 ir_node *op, ir_node *k, ir_mode *mode) {
957 return new_rd_Shl(NULL, irg, block, op, k, mode);
959 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
960 ir_node *op, ir_node *k, ir_mode *mode) {
961 return new_rd_Shr(NULL, irg, block, op, k, mode);
963 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
964 ir_node *op, ir_node *k, ir_mode *mode) {
965 return new_rd_Shrs(NULL, irg, block, op, k, mode);
967 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
968 ir_node *op, ir_node *k, ir_mode *mode) {
969 return new_rd_Rot(NULL, irg, block, op, k, mode);
971 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
972 ir_node *op, ir_mode *mode) {
973 return new_rd_Conv(NULL, irg, block, op, mode);
975 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
976 return new_rd_Cast(NULL, irg, block, op, to_tp);
978 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
979 ir_node **in, ir_mode *mode) {
980 return new_rd_Phi(NULL, irg, block, arity, in, mode);
982 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
983 ir_node *store, ir_node *adr) {
984 return new_rd_Load(NULL, irg, block, store, adr);
986 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
987 ir_node *store, ir_node *adr, ir_node *val) {
988 return new_rd_Store(NULL, irg, block, store, adr, val);
990 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
991 ir_node *size, type *alloc_type, where_alloc where) {
992 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
994 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
995 ir_node *ptr, ir_node *size, type *free_type) {
996 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
998 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
999 return new_rd_Sync(NULL, irg, block, arity, in);
1001 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1002 ir_mode *mode, long proj) {
1003 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1005 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1007 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1009 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1010 int arity, ir_node **in) {
1011 return new_rd_Tuple(NULL, irg, block, arity, in );
1013 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1014 ir_node *val, ir_mode *mode) {
1015 return new_rd_Id(NULL, irg, block, val, mode);
1017 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1018 return new_rd_Bad(irg);
1020 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1021 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1023 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1024 return new_rd_Unknown(irg, m);
1026 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1027 return new_rd_CallBegin(NULL, irg, block, callee);
1029 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1030 return new_rd_EndReg(NULL, irg, block);
1032 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1033 return new_rd_EndExcept(NULL, irg, block);
1035 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1036 return new_rd_Break(NULL, irg, block);
1038 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1039 ir_mode *mode, long proj) {
1040 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1042 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1043 ir_node *callee, int arity, ir_node **in,
1045 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1049 /** ********************/
1050 /** public interfaces */
1051 /** construction tools */
1055 * - create a new Start node in the current block
1057 * @return s - pointer to the created Start node
1062 new_d_Start (dbg_info* db)
1066 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1067 op_Start, mode_T, 0, NULL);
1068 /* res->attr.start.irg = current_ir_graph; */
1070 res = optimize_node(res);
1071 IRN_VRFY_IRG(res, current_ir_graph);
1076 new_d_End (dbg_info* db)
1079 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1080 op_End, mode_X, -1, NULL);
1081 res = optimize_node(res);
1082 IRN_VRFY_IRG(res, current_ir_graph);
1087 /* Constructs a Block with a fixed number of predecessors.
1088 Does set current_block. Can be used with automatic Phi
1089 node construction. */
1091 new_d_Block (dbg_info* db, int arity, ir_node **in)
1095 bool has_unknown = false;
1097 res = new_rd_Block(db, current_ir_graph, arity, in);
1099 /* Create and initialize array for Phi-node construction. */
1100 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1101 current_ir_graph->n_loc);
1102 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1104 for (i = arity-1; i >= 0; i--)
1105 if (get_irn_op(in[i]) == op_Unknown) {
1110 if (!has_unknown) res = optimize_node(res);
1111 current_ir_graph->current_block = res;
1113 IRN_VRFY_IRG(res, current_ir_graph);
1118 /* ***********************************************************************/
1119 /* Methods necessary for automatic Phi node creation */
1121 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1122 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1123 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1124 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1126 Call Graph: ( A ---> B == A "calls" B)
1128 get_value mature_block
1136 get_r_value_internal |
1140 new_rd_Phi0 new_rd_Phi_in
1142 * *************************************************************************** */
1144 /** Creates a Phi node with 0 predecessors */
1145 static INLINE ir_node *
1146 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1150 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1151 IRN_VRFY_IRG(res, irg);
1155 /* There are two implementations of the Phi node construction. The first
1156 is faster, but does not work for blocks with more than 2 predecessors.
1157 The second works always but is slower and causes more unnecessary Phi
1159 Select the implementations by the following preprocessor flag set in
1161 #if USE_FAST_PHI_CONSTRUCTION
1163 /* This is a stack used for allocating and deallocating nodes in
1164 new_rd_Phi_in. The original implementation used the obstack
1165 to model this stack, now it is explicit. This reduces side effects.
1167 #if USE_EXPLICIT_PHI_IN_STACK
1168 INLINE Phi_in_stack *
1169 new_Phi_in_stack(void) {
1172 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1174 res->stack = NEW_ARR_F (ir_node *, 1);
1181 free_Phi_in_stack(Phi_in_stack *s) {
1182 DEL_ARR_F(s->stack);
1186 free_to_Phi_in_stack(ir_node *phi) {
1187 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1188 current_ir_graph->Phi_in_stack->pos)
1189 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1191 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1193 (current_ir_graph->Phi_in_stack->pos)++;
1196 static INLINE ir_node *
1197 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1198 int arity, ir_node **in) {
1200 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1201 int pos = current_ir_graph->Phi_in_stack->pos;
1205 /* We need to allocate a new node */
1206 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1207 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1209 /* reuse the old node and initialize it again. */
1212 assert (res->kind == k_ir_node);
1213 assert (res->op == op_Phi);
1217 assert (arity >= 0);
1218 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1219 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1221 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1223 (current_ir_graph->Phi_in_stack->pos)--;
1227 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1229 /* Creates a Phi node with a given, fixed array **in of predecessors.
1230 If the Phi node is unnecessary, as the same value reaches the block
1231 through all control flow paths, it is eliminated and the value
1232 returned directly. This constructor is only intended for use in
1233 the automatic Phi node generation triggered by get_value or mature.
1234 The implementation is quite tricky and depends on the fact, that
1235 the nodes are allocated on a stack:
1236 The in array contains predecessors and NULLs. The NULLs appear,
1237 if get_r_value_internal, that computed the predecessors, reached
1238 the same block on two paths. In this case the same value reaches
1239 this block on both paths, there is no definition in between. We need
1240 not allocate a Phi where these path's merge, but we have to communicate
1241 this fact to the caller. This happens by returning a pointer to the
1242 node the caller _will_ allocate. (Yes, we predict the address. We can
1243 do so because the nodes are allocated on the obstack.) The caller then
1244 finds a pointer to itself and, when this routine is called again,
1247 static INLINE ir_node *
1248 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1251 ir_node *res, *known;
1253 /* Allocate a new node on the obstack. This can return a node to
1254 which some of the pointers in the in-array already point.
1255 Attention: the constructor copies the in array, i.e., the later
1256 changes to the array in this routine do not affect the
1257 constructed node! If the in array contains NULLs, there will be
1258 missing predecessors in the returned node. Is this a possible
1259 internal state of the Phi node generation? */
1260 #if USE_EXPLICIT_PHI_IN_STACK
1261 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1263 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1264 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1267 /* The in-array can contain NULLs. These were returned by
1268 get_r_value_internal if it reached the same block/definition on a
1269 second path. The NULLs are replaced by the node itself to
1270 simplify the test in the next loop. */
1271 for (i = 0; i < ins; ++i) {
1276 /* This loop checks whether the Phi has more than one predecessor.
1277 If so, it is a real Phi node and we break the loop. Else the Phi
1278 node merges the same definition on several paths and therefore is
1280 for (i = 0; i < ins; ++i)
1282 if (in[i] == res || in[i] == known) continue;
1290 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1292 #if USE_EXPLICIT_PHI_IN_STACK
1293 free_to_Phi_in_stack(res);
1295 obstack_free (current_ir_graph->obst, res);
1299 res = optimize_node (res);
1300 IRN_VRFY_IRG(res, irg);
1303 /* return the pointer to the Phi node. This node might be deallocated! */
1308 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1311 allocates and returns this node. The routine called to allocate the
1312 node might optimize it away and return a real value, or even a pointer
1313 to a deallocated Phi node on top of the obstack!
1314 This function is called with an in-array of proper size. **/
1316 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1318 ir_node *prevBlock, *res;
1321 /* This loop goes to all predecessor blocks of the block the Phi node is in
1322 and there finds the operands of the Phi node by calling
1323 get_r_value_internal. */
1324 for (i = 1; i <= ins; ++i) {
1325 assert (block->in[i]);
1326 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1328 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1331 /* After collecting all predecessors into the array nin a new Phi node
1332 with these predecessors is created. This constructor contains an
1333 optimization: If all predecessors of the Phi node are identical it
1334 returns the only operand instead of a new Phi node. If the value
1335 passes two different control flow edges without being defined, and
1336 this is the second path treated, a pointer to the node that will be
1337 allocated for the first path (recursion) is returned. We already
1338 know the address of this node, as it is the next node to be allocated
1339 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1340 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1342 /* Now we now the value for "pos" and can enter it in the array with
1343 all known local variables. Attention: this might be a pointer to
1344 a node, that later will be allocated!!! See new_rd_Phi_in.
1345 If this is called in mature, after some set_value in the same block,
1346 the proper value must not be overwritten:
1348 get_value (makes Phi0, put's it into graph_arr)
1349 set_value (overwrites Phi0 in graph_arr)
1350 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1353 if (!block->attr.block.graph_arr[pos]) {
1354 block->attr.block.graph_arr[pos] = res;
1356 /* printf(" value already computed by %s\n",
1357 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1363 /* This function returns the last definition of a variable. In case
1364 this variable was last defined in a previous block, Phi nodes are
1365 inserted. If the part of the firm graph containing the definition
1366 is not yet constructed, a dummy Phi node is returned. */
1368 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1371 /* There are 4 cases to treat.
1373 1. The block is not mature and we visit it the first time. We can not
1374 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1375 predecessors is returned. This node is added to the linked list (field
1376 "link") of the containing block to be completed when this block is
1377 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1380 2. The value is already known in this block, graph_arr[pos] is set and we
1381 visit the block the first time. We can return the value without
1382 creating any new nodes.
1384 3. The block is mature and we visit it the first time. A Phi node needs
1385 to be created (phi_merge). If the Phi is not needed, as all it's
1386 operands are the same value reaching the block through different
1387 paths, it's optimized away and the value itself is returned.
1389 4. The block is mature, and we visit it the second time. Now two
1390 subcases are possible:
1391 * The value was computed completely the last time we were here. This
1392 is the case if there is no loop. We can return the proper value.
1393 * The recursion that visited this node and set the flag did not
1394 return yet. We are computing a value in a loop and need to
1395 break the recursion without knowing the result yet.
1396 @@@ strange case. Straight forward we would create a Phi before
1397 starting the computation of it's predecessors. In this case we will
1398 find a Phi here in any case. The problem is that this implementation
1399 only creates a Phi after computing the predecessors, so that it is
1400 hard to compute self references of this Phi. @@@
1401 There is no simple check for the second subcase. Therefore we check
1402 for a second visit and treat all such cases as the second subcase.
1403 Anyways, the basic situation is the same: we reached a block
1404 on two paths without finding a definition of the value: No Phi
1405 nodes are needed on both paths.
1406 We return this information "Two paths, no Phi needed" by a very tricky
1407 implementation that relies on the fact that an obstack is a stack and
1408 will return a node with the same address on different allocations.
1409 Look also at phi_merge and new_rd_phi_in to understand this.
1410 @@@ Unfortunately this does not work, see testprogram
1411 three_cfpred_example.
1415 /* case 4 -- already visited. */
1416 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1418 /* visited the first time */
1419 set_irn_visited(block, get_irg_visited(current_ir_graph));
1421 /* Get the local valid value */
1422 res = block->attr.block.graph_arr[pos];
1424 /* case 2 -- If the value is actually computed, return it. */
1425 if (res) return res;
1427 if (block->attr.block.matured) { /* case 3 */
1429 /* The Phi has the same amount of ins as the corresponding block. */
1430 int ins = get_irn_arity(block);
1432 NEW_ARR_A (ir_node *, nin, ins);
1434 /* Phi merge collects the predecessors and then creates a node. */
1435 res = phi_merge (block, pos, mode, nin, ins);
1437 } else { /* case 1 */
1438 /* The block is not mature, we don't know how many in's are needed. A Phi
1439 with zero predecessors is created. Such a Phi node is called Phi0
1440 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1441 to the list of Phi0 nodes in this block to be matured by mature_block
1443 The Phi0 has to remember the pos of it's internal value. If the real
1444 Phi is computed, pos is used to update the array with the local
1447 res = new_rd_Phi0 (current_ir_graph, block, mode);
1448 res->attr.phi0_pos = pos;
1449 res->link = block->link;
1453 /* If we get here, the frontend missed a use-before-definition error */
1456 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1457 assert (mode->code >= irm_F && mode->code <= irm_P);
1458 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1459 tarval_mode_null[mode->code]);
1462 /* The local valid value is available now. */
1463 block->attr.block.graph_arr[pos] = res;
1471 it starts the recursion. This causes an Id at the entry of
1472 every block that has no definition of the value! **/
1474 #if USE_EXPLICIT_PHI_IN_STACK
1476 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1477 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1480 static INLINE ir_node *
1481 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1482 ir_node **in, int ins, ir_node *phi0)
1485 ir_node *res, *known;
1487 /* Allocate a new node on the obstack. The allocation copies the in
1489 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1490 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1492 /* This loop checks whether the Phi has more than one predecessor.
1493 If so, it is a real Phi node and we break the loop. Else the
1494 Phi node merges the same definition on several paths and therefore
1495 is not needed. Don't consider Bad nodes! */
1497 for (i=0; i < ins; ++i)
1501 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1503 /* Optimize self referencing Phis: We can't detect them yet properly, as
1504 they still refer to the Phi0 they will replace. So replace right now. */
1505 if (phi0 && in[i] == phi0) in[i] = res;
1507 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1515 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1518 obstack_free (current_ir_graph->obst, res);
1519 if (is_Phi(known)) {
1520 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1521 order, an enclosing Phi know may get superfluous. */
1522 res = optimize_in_place_2(known);
1523 if (res != known) { exchange(known, res); }
1528 /* A undefined value, e.g., in unreachable code. */
1532 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1533 IRN_VRFY_IRG(res, irg);
1534 /* Memory Phis in endless loops must be kept alive.
1535 As we can't distinguish these easily we keep all of them alive. */
1536 if ((res->op == op_Phi) && (mode == mode_M))
1537 add_End_keepalive(irg->end, res);
1544 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1546 #if PRECISE_EXC_CONTEXT
1548 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1550 /* Construct a new frag_array for node n.
1551 Copy the content from the current graph_arr of the corresponding block:
1552 this is the current state.
1553 Set ProjM(n) as current memory state.
1554 Further the last entry in frag_arr of current block points to n. This
1555 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1557 static INLINE ir_node ** new_frag_arr (ir_node *n)
1562 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1563 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1564 sizeof(ir_node *)*current_ir_graph->n_loc);
1566 /* turn off optimization before allocating Proj nodes, as res isn't
1568 opt = get_opt_optimize(); set_optimize(0);
1569 /* Here we rely on the fact that all frag ops have Memory as first result! */
1570 if (get_irn_op(n) == op_Call)
1571 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1573 assert((pn_Quot_M == pn_DivMod_M) &&
1574 (pn_Quot_M == pn_Div_M) &&
1575 (pn_Quot_M == pn_Mod_M) &&
1576 (pn_Quot_M == pn_Load_M) &&
1577 (pn_Quot_M == pn_Store_M) &&
1578 (pn_Quot_M == pn_Alloc_M) );
1579 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1583 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1587 static INLINE ir_node **
1588 get_frag_arr (ir_node *n) {
1589 if (get_irn_op(n) == op_Call) {
1590 return n->attr.call.frag_arr;
1591 } else if (get_irn_op(n) == op_Alloc) {
1592 return n->attr.a.frag_arr;
1594 return n->attr.frag_arr;
1599 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1601 if (!frag_arr[pos]) frag_arr[pos] = val;
1602 if (frag_arr[current_ir_graph->n_loc - 1]) {
1603 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1604 assert(arr != frag_arr && "Endless recursion detected");
1605 set_frag_value(arr, pos, val);
1610 for (i = 0; i < 1000; ++i) {
1611 if (!frag_arr[pos]) {
1612 frag_arr[pos] = val;
1614 if (frag_arr[current_ir_graph->n_loc - 1]) {
1615 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1621 assert(0 && "potential endless recursion");
1626 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1630 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1632 frag_arr = get_frag_arr(cfOp);
1633 res = frag_arr[pos];
1635 if (block->attr.block.graph_arr[pos]) {
1636 /* There was a set_value after the cfOp and no get_value before that
1637 set_value. We must build a Phi node now. */
1638 if (block->attr.block.matured) {
1639 int ins = get_irn_arity(block);
1641 NEW_ARR_A (ir_node *, nin, ins);
1642 res = phi_merge(block, pos, mode, nin, ins);
1644 res = new_rd_Phi0 (current_ir_graph, block, mode);
1645 res->attr.phi0_pos = pos;
1646 res->link = block->link;
1650 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1651 but this should be better: (remove comment if this works) */
1652 /* It's a Phi, we can write this into all graph_arrs with NULL */
1653 set_frag_value(block->attr.block.graph_arr, pos, res);
1655 res = get_r_value_internal(block, pos, mode);
1656 set_frag_value(block->attr.block.graph_arr, pos, res);
1664 computes the predecessors for the real phi node, and then
1665 allocates and returns this node. The routine called to allocate the
1666 node might optimize it away and return a real value.
1667 This function must be called with an in-array of proper size. **/
1669 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1671 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1674 /* If this block has no value at pos create a Phi0 and remember it
1675 in graph_arr to break recursions.
1676 Else we may not set graph_arr as there a later value is remembered. */
1678 if (!block->attr.block.graph_arr[pos]) {
1679 if (block == get_irg_start_block(current_ir_graph)) {
1680 /* Collapsing to Bad tarvals is no good idea.
1681 So we call a user-supplied routine here that deals with this case as
1682 appropriate for the given language. Sorryly the only help we can give
1683 here is the position.
1685 Even if all variables are defined before use, it can happen that
1686 we get to the start block, if a cond has been replaced by a tuple
1687 (bad, jmp). In this case we call the function needlessly, eventually
1688 generating an non existant error.
1689 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1692 if (default_initialize_local_variable)
1693 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1695 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1696 /* We don't need to care about exception ops in the start block.
1697 There are none by definition. */
1698 return block->attr.block.graph_arr[pos];
1700 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1701 block->attr.block.graph_arr[pos] = phi0;
1702 #if PRECISE_EXC_CONTEXT
1703 if (get_opt_precise_exc_context()) {
1704 /* Set graph_arr for fragile ops. Also here we should break recursion.
1705 We could choose a cyclic path through an cfop. But the recursion would
1706 break at some point. */
1707 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1713 /* This loop goes to all predecessor blocks of the block the Phi node
1714 is in and there finds the operands of the Phi node by calling
1715 get_r_value_internal. */
1716 for (i = 1; i <= ins; ++i) {
1717 prevCfOp = skip_Proj(block->in[i]);
1719 if (is_Bad(prevCfOp)) {
1720 /* In case a Cond has been optimized we would get right to the start block
1721 with an invalid definition. */
1722 nin[i-1] = new_Bad();
1725 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1727 if (!is_Bad(prevBlock)) {
1728 #if PRECISE_EXC_CONTEXT
1729 if (get_opt_precise_exc_context() &&
1730 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1731 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1732 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1735 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1737 nin[i-1] = new_Bad();
1741 /* We want to pass the Phi0 node to the constructor: this finds additional
1742 optimization possibilities.
1743 The Phi0 node either is allocated in this function, or it comes from
1744 a former call to get_r_value_internal. In this case we may not yet
1745 exchange phi0, as this is done in mature_block. */
1747 phi0_all = block->attr.block.graph_arr[pos];
1748 if (!((get_irn_op(phi0_all) == op_Phi) &&
1749 (get_irn_arity(phi0_all) == 0) &&
1750 (get_nodes_block(phi0_all) == block)))
1756 /* After collecting all predecessors into the array nin a new Phi node
1757 with these predecessors is created. This constructor contains an
1758 optimization: If all predecessors of the Phi node are identical it
1759 returns the only operand instead of a new Phi node. */
1760 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1762 /* In case we allocated a Phi0 node at the beginning of this procedure,
1763 we need to exchange this Phi0 with the real Phi. */
1765 exchange(phi0, res);
1766 block->attr.block.graph_arr[pos] = res;
1767 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1768 only an optimization. */
1774 /* This function returns the last definition of a variable. In case
1775 this variable was last defined in a previous block, Phi nodes are
1776 inserted. If the part of the firm graph containing the definition
1777 is not yet constructed, a dummy Phi node is returned. */
1779 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1782 /* There are 4 cases to treat.
1784 1. The block is not mature and we visit it the first time. We can not
1785 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1786 predecessors is returned. This node is added to the linked list (field
1787 "link") of the containing block to be completed when this block is
1788 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1791 2. The value is already known in this block, graph_arr[pos] is set and we
1792 visit the block the first time. We can return the value without
1793 creating any new nodes.
1795 3. The block is mature and we visit it the first time. A Phi node needs
1796 to be created (phi_merge). If the Phi is not needed, as all it's
1797 operands are the same value reaching the block through different
1798 paths, it's optimized away and the value itself is returned.
1800 4. The block is mature, and we visit it the second time. Now two
1801 subcases are possible:
1802 * The value was computed completely the last time we were here. This
1803 is the case if there is no loop. We can return the proper value.
1804 * The recursion that visited this node and set the flag did not
1805 return yet. We are computing a value in a loop and need to
1806 break the recursion. This case only happens if we visited
1807 the same block with phi_merge before, which inserted a Phi0.
1808 So we return the Phi0.
1811 /* case 4 -- already visited. */
1812 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1813 /* As phi_merge allocates a Phi0 this value is always defined. Here
1814 is the critical difference of the two algorithms. */
1815 assert(block->attr.block.graph_arr[pos]);
1816 return block->attr.block.graph_arr[pos];
1819 /* visited the first time */
1820 set_irn_visited(block, get_irg_visited(current_ir_graph));
1822 /* Get the local valid value */
1823 res = block->attr.block.graph_arr[pos];
1825 /* case 2 -- If the value is actually computed, return it. */
1826 if (res) { return res; };
1828 if (block->attr.block.matured) { /* case 3 */
1830 /* The Phi has the same amount of ins as the corresponding block. */
1831 int ins = get_irn_arity(block);
1833 NEW_ARR_A (ir_node *, nin, ins);
1835 /* Phi merge collects the predecessors and then creates a node. */
1836 res = phi_merge (block, pos, mode, nin, ins);
1838 } else { /* case 1 */
1839 /* The block is not mature, we don't know how many in's are needed. A Phi
1840 with zero predecessors is created. Such a Phi node is called Phi0
1841 node. The Phi0 is then added to the list of Phi0 nodes in this block
1842 to be matured by mature_block later.
1843 The Phi0 has to remember the pos of it's internal value. If the real
1844 Phi is computed, pos is used to update the array with the local
1846 res = new_rd_Phi0 (current_ir_graph, block, mode);
1847 res->attr.phi0_pos = pos;
1848 res->link = block->link;
1852 /* If we get here, the frontend missed a use-before-definition error */
1855 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1856 assert (mode->code >= irm_F && mode->code <= irm_P);
1857 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1858 get_mode_null(mode));
1861 /* The local valid value is available now. */
1862 block->attr.block.graph_arr[pos] = res;
1867 #endif /* USE_FAST_PHI_CONSTRUCTION */
1869 /* ************************************************************************** */
1871 /** Finalize a Block node, when all control flows are known. */
1872 /** Acceptable parameters are only Block nodes. */
1874 mature_block (ir_node *block)
1881 assert (get_irn_opcode(block) == iro_Block);
1882 /* @@@ should be commented in
1883 assert (!get_Block_matured(block) && "Block already matured"); */
1885 if (!get_Block_matured(block)) {
1886 ins = ARR_LEN (block->in)-1;
1887 /* Fix block parameters */
1888 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1890 /* An array for building the Phi nodes. */
1891 NEW_ARR_A (ir_node *, nin, ins);
1893 /* Traverse a chain of Phi nodes attached to this block and mature
1895 for (n = block->link; n; n=next) {
1896 inc_irg_visited(current_ir_graph);
1898 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1901 block->attr.block.matured = 1;
1903 /* Now, as the block is a finished firm node, we can optimize it.
1904 Since other nodes have been allocated since the block was created
1905 we can not free the node on the obstack. Therefore we have to call
1907 Unfortunately the optimization does not change a lot, as all allocated
1908 nodes refer to the unoptimized node.
1909 We can call _2, as global cse has no effect on blocks. */
1910 block = optimize_in_place_2(block);
1911 IRN_VRFY_IRG(block, current_ir_graph);
1916 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1918 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1923 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1925 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1930 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1932 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1938 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1940 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1945 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1947 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1952 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1955 assert(arg->op == op_Cond);
1956 arg->attr.c.kind = fragmentary;
1957 arg->attr.c.default_proj = max_proj;
1958 res = new_Proj (arg, mode_X, max_proj);
1963 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1965 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
1970 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1972 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1976 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1978 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
1983 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1985 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
1990 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1992 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
1998 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2000 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2005 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2007 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2012 * allocate the frag array
2014 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2015 if (get_opt_precise_exc_context()) {
2016 if ((current_ir_graph->phase_state == phase_building) &&
2017 (get_irn_op(res) == op) && /* Could be optimized away. */
2018 !*frag_store) /* Could be a cse where the arr is already set. */ {
2019 *frag_store = new_frag_arr(res);
2026 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2029 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2031 #if PRECISE_EXC_CONTEXT
2032 allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */
2039 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2042 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2044 #if PRECISE_EXC_CONTEXT
2045 allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */
2052 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2055 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2057 #if PRECISE_EXC_CONTEXT
2058 allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */
2065 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2068 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2070 #if PRECISE_EXC_CONTEXT
2071 allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */
2078 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2080 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2085 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2087 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2092 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2094 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2099 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2101 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2106 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2108 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2113 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2115 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2120 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2122 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2127 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2129 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2134 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2136 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2141 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2143 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2148 new_d_Jmp (dbg_info* db)
2150 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2154 new_d_Cond (dbg_info* db, ir_node *c)
2156 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2160 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2164 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2165 store, callee, arity, in, tp);
2166 #if PRECISE_EXC_CONTEXT
2167 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2174 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2176 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2181 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2183 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2188 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2191 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2193 #if PRECISE_EXC_CONTEXT
2194 allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */
2201 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2204 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2206 #if PRECISE_EXC_CONTEXT
2207 allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */
2214 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2218 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2219 store, size, alloc_type, where);
2220 #if PRECISE_EXC_CONTEXT
2221 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2228 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2230 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2231 store, ptr, size, free_type);
2235 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2236 /* GL: objptr was called frame before. Frame was a bad choice for the name
2237 as the operand could as well be a pointer to a dynamic object. */
2239 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2240 store, objptr, 0, NULL, ent);
2244 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2246 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2247 store, objptr, n_index, index, sel);
2251 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2253 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2254 store, objptr, ent));
2258 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2260 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2265 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2267 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2272 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2274 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2282 return __new_d_Bad();
2286 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2288 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2293 new_d_Unknown (ir_mode *m)
2295 return new_rd_Unknown(current_ir_graph, m);
2299 new_d_CallBegin (dbg_info *db, ir_node *call)
2302 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2307 new_d_EndReg (dbg_info *db)
2310 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2315 new_d_EndExcept (dbg_info *db)
2318 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2323 new_d_Break (dbg_info *db)
2325 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2329 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2331 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2336 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2340 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2341 callee, arity, in, tp);
2346 /* ********************************************************************* */
2347 /* Comfortable interface with automatic Phi node construction. */
2348 /* (Uses also constructors of ?? interface, except new_Block. */
2349 /* ********************************************************************* */
2351 /* * Block construction **/
2352 /* immature Block without predecessors */
2353 ir_node *new_d_immBlock (dbg_info* db) {
2356 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2357 /* creates a new dynamic in-array as length of in is -1 */
2358 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2359 current_ir_graph->current_block = res;
2360 res->attr.block.matured = 0;
2361 /* res->attr.block.exc = exc_normal; */
2362 /* res->attr.block.handler_entry = 0; */
2363 res->attr.block.irg = current_ir_graph;
2364 res->attr.block.backedge = NULL;
2365 res->attr.block.in_cg = NULL;
2366 res->attr.block.cg_backedge = NULL;
2367 set_Block_block_visited(res, 0);
2369 /* Create and initialize array for Phi-node construction. */
2370 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2371 current_ir_graph->n_loc);
2372 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2374 /* Immature block may not be optimized! */
2375 IRN_VRFY_IRG(res, current_ir_graph);
2381 new_immBlock (void) {
2382 return new_d_immBlock(NULL);
2385 /* add an adge to a jmp/control flow node */
2387 add_in_edge (ir_node *block, ir_node *jmp)
2389 if (block->attr.block.matured) {
2390 assert(0 && "Error: Block already matured!\n");
2393 assert(jmp != NULL);
2394 ARR_APP1(ir_node *, block->in, jmp);
2398 /* changing the current block */
2400 switch_block (ir_node *target)
2402 current_ir_graph->current_block = target;
2405 /* ************************ */
2406 /* parameter administration */
2408 /* get a value from the parameter array from the current block by its index */
2410 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2412 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2413 inc_irg_visited(current_ir_graph);
2415 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2417 /* get a value from the parameter array from the current block by its index */
2419 get_value (int pos, ir_mode *mode)
2421 return get_d_value(NULL, pos, mode);
2424 /* set a value at position pos in the parameter array from the current block */
2426 set_value (int pos, ir_node *value)
2428 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2429 assert(pos+1 < current_ir_graph->n_loc);
2430 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2433 /* get the current store */
2437 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2438 /* GL: one could call get_value instead */
2439 inc_irg_visited(current_ir_graph);
2440 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2443 /* set the current store */
2445 set_store (ir_node *store)
2447 /* GL: one could call set_value instead */
2448 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2449 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2453 keep_alive (ir_node *ka)
2455 add_End_keepalive(current_ir_graph->end, ka);
2458 /** Useful access routines **/
2459 /* Returns the current block of the current graph. To set the current
2460 block use switch_block(). */
2461 ir_node *get_cur_block() {
2462 return get_irg_current_block(current_ir_graph);
2465 /* Returns the frame type of the current graph */
2466 type *get_cur_frame_type() {
2467 return get_irg_frame_type(current_ir_graph);
2471 /* ********************************************************************* */
2474 /* call once for each run of the library */
2476 init_cons (default_initialize_local_variable_func_t *func)
2478 default_initialize_local_variable = func;
2481 /* call for each graph */
2483 finalize_cons (ir_graph *irg) {
2484 irg->phase_state = phase_high;
2488 ir_node *new_Block(int arity, ir_node **in) {
2489 return new_d_Block(NULL, arity, in);
2491 ir_node *new_Start (void) {
2492 return new_d_Start(NULL);
2494 ir_node *new_End (void) {
2495 return new_d_End(NULL);
2497 ir_node *new_Jmp (void) {
2498 return new_d_Jmp(NULL);
2500 ir_node *new_Cond (ir_node *c) {
2501 return new_d_Cond(NULL, c);
2503 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2504 return new_d_Return(NULL, store, arity, in);
2506 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2507 return new_d_Raise(NULL, store, obj);
2509 ir_node *new_Const (ir_mode *mode, tarval *con) {
2510 return new_d_Const(NULL, mode, con);
2512 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2513 return new_d_SymConst(NULL, value, kind);
2515 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2516 return new_d_simpleSel(NULL, store, objptr, ent);
2518 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2520 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2522 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2523 return new_d_InstOf (NULL, store, objptr, ent);
2525 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2527 return new_d_Call(NULL, store, callee, arity, in, tp);
2529 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2530 return new_d_Add(NULL, op1, op2, mode);
2532 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2533 return new_d_Sub(NULL, op1, op2, mode);
2535 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2536 return new_d_Minus(NULL, op, mode);
2538 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2539 return new_d_Mul(NULL, op1, op2, mode);
2541 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2542 return new_d_Quot(NULL, memop, op1, op2);
2544 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2545 return new_d_DivMod(NULL, memop, op1, op2);
2547 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2548 return new_d_Div(NULL, memop, op1, op2);
2550 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2551 return new_d_Mod(NULL, memop, op1, op2);
2553 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2554 return new_d_Abs(NULL, op, mode);
2556 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2557 return new_d_And(NULL, op1, op2, mode);
2559 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2560 return new_d_Or(NULL, op1, op2, mode);
2562 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2563 return new_d_Eor(NULL, op1, op2, mode);
2565 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2566 return new_d_Not(NULL, op, mode);
2568 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2569 return new_d_Shl(NULL, op, k, mode);
2571 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2572 return new_d_Shr(NULL, op, k, mode);
2574 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2575 return new_d_Shrs(NULL, op, k, mode);
2577 #define new_Rotate new_Rot
2578 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2579 return new_d_Rot(NULL, op, k, mode);
2581 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2582 return new_d_Cmp(NULL, op1, op2);
2584 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2585 return new_d_Conv(NULL, op, mode);
2587 ir_node *new_Cast (ir_node *op, type *to_tp) {
2588 return new_d_Cast(NULL, op, to_tp);
2590 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2591 return new_d_Phi(NULL, arity, in, mode);
2593 ir_node *new_Load (ir_node *store, ir_node *addr) {
2594 return new_d_Load(NULL, store, addr);
2596 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2597 return new_d_Store(NULL, store, addr, val);
2599 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2600 where_alloc where) {
2601 return new_d_Alloc(NULL, store, size, alloc_type, where);
2603 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2605 return new_d_Free(NULL, store, ptr, size, free_type);
2607 ir_node *new_Sync (int arity, ir_node **in) {
2608 return new_d_Sync(NULL, arity, in);
2610 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2611 return new_d_Proj(NULL, arg, mode, proj);
2613 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2614 return new_d_defaultProj(NULL, arg, max_proj);
2616 ir_node *new_Tuple (int arity, ir_node **in) {
2617 return new_d_Tuple(NULL, arity, in);
2619 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2620 return new_d_Id(NULL, val, mode);
2622 ir_node *new_Bad (void) {
2625 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2626 return new_d_Confirm (NULL, val, bound, cmp);
2628 ir_node *new_Unknown(ir_mode *m) {
2629 return new_d_Unknown(m);
2631 ir_node *new_CallBegin (ir_node *callee) {
2632 return new_d_CallBegin(NULL, callee);
2634 ir_node *new_EndReg (void) {
2635 return new_d_EndReg(NULL);
2637 ir_node *new_EndExcept (void) {
2638 return new_d_EndExcept(NULL);
2640 ir_node *new_Break (void) {
2641 return new_d_Break(NULL);
2643 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2644 return new_d_Filter(NULL, arg, mode, proj);
2646 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2647 return new_d_FuncCall(NULL, callee, arity, in, tp);