3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 /* res->attr.block.exc = exc_normal; */
64 /* res->attr.block.handler_entry = 0; */
65 res->attr.block.irg = irg;
66 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
67 res->attr.block.in_cg = NULL;
68 res->attr.block.cg_backedge = NULL;
70 irn_vrfy_irg (res, irg);
75 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
79 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
80 /* res->attr.start.irg = irg; */
82 irn_vrfy_irg (res, irg);
87 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
91 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
93 irn_vrfy_irg (res, irg);
97 /* Creates a Phi node with all predecessors. Calling this constructor
98 is only allowed if the corresponding block is mature. */
100 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
104 bool has_unknown = false;
106 /* Don't assert that block matured: the use of this constructor is strongly
108 if ( get_Block_matured(block) )
109 assert( get_irn_arity(block) == arity );
111 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
113 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
115 for (i = arity-1; i >= 0; i--)
116 if (get_irn_op(in[i]) == op_Unknown) {
121 if (!has_unknown) res = optimize_node (res);
122 irn_vrfy_irg (res, irg);
124 /* Memory Phis in endless loops must be kept alive.
125 As we can't distinguish these easily we keep all of them alive. */
126 if ((res->op == op_Phi) && (mode == mode_M))
127 add_End_keepalive(irg->end, res);
132 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
135 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
136 res->attr.con.tv = con;
137 set_Const_type(res, tp); /* Call method because of complex assertion. */
138 res = optimize_node (res);
139 assert(get_Const_type(res) == tp);
140 irn_vrfy_irg (res, irg);
146 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
148 type *tp = unknown_type;
149 /* removing this somehow causes errors in jack. */
150 if (tarval_is_entity(con))
151 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
153 return new_rd_Const_type (db, irg, block, mode, con, tp);
157 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
162 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
163 res = optimize_node (res);
164 irn_vrfy_irg (res, irg);
169 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
175 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
176 res->attr.proj = proj;
179 assert(get_Proj_pred(res));
180 assert(get_nodes_Block(get_Proj_pred(res)));
182 res = optimize_node (res);
184 irn_vrfy_irg (res, irg);
190 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
194 assert(arg->op == op_Cond);
195 arg->attr.c.kind = fragmentary;
196 arg->attr.c.default_proj = max_proj;
197 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
202 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
207 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
208 res = optimize_node (res);
209 irn_vrfy_irg (res, irg);
214 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
217 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
218 res->attr.cast.totype = to_tp;
219 res = optimize_node (res);
220 irn_vrfy_irg (res, irg);
225 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
229 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
230 res = optimize_node (res);
231 irn_vrfy_irg (res, irg);
236 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
237 ir_node *op1, ir_node *op2, ir_mode *mode)
243 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
244 res = optimize_node (res);
245 irn_vrfy_irg (res, irg);
250 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
251 ir_node *op1, ir_node *op2, ir_mode *mode)
257 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
258 res = optimize_node (res);
259 irn_vrfy_irg (res, irg);
264 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
265 ir_node *op, ir_mode *mode)
270 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
271 res = optimize_node (res);
272 irn_vrfy_irg (res, irg);
277 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
278 ir_node *op1, ir_node *op2, ir_mode *mode)
284 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
285 res = optimize_node (res);
286 irn_vrfy_irg (res, irg);
291 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
292 ir_node *memop, ir_node *op1, ir_node *op2)
299 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
300 res = optimize_node (res);
301 irn_vrfy_irg (res, irg);
306 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
307 ir_node *memop, ir_node *op1, ir_node *op2)
314 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
315 res = optimize_node (res);
316 irn_vrfy_irg (res, irg);
321 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
322 ir_node *memop, ir_node *op1, ir_node *op2)
329 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
330 res = optimize_node (res);
331 irn_vrfy_irg (res, irg);
336 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
337 ir_node *memop, ir_node *op1, ir_node *op2)
344 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
345 res = optimize_node (res);
346 irn_vrfy_irg (res, irg);
351 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
352 ir_node *op1, ir_node *op2, ir_mode *mode)
358 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
359 res = optimize_node (res);
360 irn_vrfy_irg (res, irg);
365 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
366 ir_node *op1, ir_node *op2, ir_mode *mode)
372 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
373 res = optimize_node (res);
374 irn_vrfy_irg (res, irg);
379 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
380 ir_node *op1, ir_node *op2, ir_mode *mode)
386 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
387 res = optimize_node (res);
388 irn_vrfy_irg (res, irg);
393 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
394 ir_node *op, ir_mode *mode)
399 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
400 res = optimize_node (res);
401 irn_vrfy_irg (res, irg);
406 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
407 ir_node *op, ir_node *k, ir_mode *mode)
413 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
414 res = optimize_node (res);
415 irn_vrfy_irg (res, irg);
420 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
421 ir_node *op, ir_node *k, ir_mode *mode)
427 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
428 res = optimize_node (res);
429 irn_vrfy_irg (res, irg);
434 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
435 ir_node *op, ir_node *k, ir_mode *mode)
441 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
442 res = optimize_node (res);
443 irn_vrfy_irg (res, irg);
448 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
449 ir_node *op, ir_node *k, ir_mode *mode)
455 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
456 res = optimize_node (res);
457 irn_vrfy_irg (res, irg);
462 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
463 ir_node *op, ir_mode *mode)
468 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
469 res = optimize_node (res);
470 irn_vrfy_irg (res, irg);
475 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
476 ir_node *op1, ir_node *op2)
482 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
483 res = optimize_node (res);
484 irn_vrfy_irg (res, irg);
489 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
492 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
493 res = optimize_node (res);
494 irn_vrfy_irg (res, irg);
499 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
504 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
505 res->attr.c.kind = dense;
506 res->attr.c.default_proj = 0;
507 res = optimize_node (res);
508 irn_vrfy_irg (res, irg);
513 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
514 ir_node *callee, int arity, ir_node **in, type *tp)
521 NEW_ARR_A (ir_node *, r_in, r_arity);
524 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
526 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
528 assert(is_method_type(tp));
529 set_Call_type(res, tp);
530 res->attr.call.callee_arr = NULL;
531 res = optimize_node (res);
532 irn_vrfy_irg (res, irg);
537 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
538 ir_node *store, int arity, ir_node **in)
545 NEW_ARR_A (ir_node *, r_in, r_arity);
547 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
548 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
549 res = optimize_node (res);
550 irn_vrfy_irg (res, irg);
555 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
561 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
562 res = optimize_node (res);
563 irn_vrfy_irg (res, irg);
568 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
569 ir_node *store, ir_node *adr)
575 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
577 res = optimize_node (res);
578 irn_vrfy_irg (res, irg);
583 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
584 ir_node *store, ir_node *adr, ir_node *val)
591 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
593 res = optimize_node (res);
595 irn_vrfy_irg (res, irg);
600 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
601 ir_node *size, type *alloc_type, where_alloc where)
607 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
609 res->attr.a.where = where;
610 res->attr.a.type = alloc_type;
612 res = optimize_node (res);
613 irn_vrfy_irg (res, irg);
618 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
619 ir_node *ptr, ir_node *size, type *free_type)
626 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
628 res->attr.f = free_type;
630 res = optimize_node (res);
631 irn_vrfy_irg (res, irg);
636 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
637 int arity, ir_node **in, entity *ent)
643 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
646 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
649 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
650 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
652 res->attr.s.ent = ent;
654 res = optimize_node (res);
655 irn_vrfy_irg (res, irg);
660 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
661 ir_node *objptr, type *ent)
668 NEW_ARR_A (ir_node *, r_in, r_arity);
672 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
674 res->attr.io.ent = ent;
676 /* res = optimize (res);
677 * irn_vrfy_irg (res, irg); */
682 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
683 symconst_kind symkind)
687 if (symkind == linkage_ptr_info)
691 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
693 res->attr.i.num = symkind;
694 if (symkind == linkage_ptr_info) {
695 res->attr.i.tori.ptrinfo = (ident *)value;
697 assert ( ( (symkind == type_tag)
698 || (symkind == size))
699 && (is_type(value)));
700 res->attr.i.tori.typ = (type *)value;
702 res = optimize_node (res);
703 irn_vrfy_irg (res, irg);
708 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
712 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
714 res = optimize_node (res);
715 irn_vrfy_irg (res, irg);
720 new_rd_Bad (ir_graph *irg)
726 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
728 ir_node *in[2], *res;
732 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
734 res->attr.confirm_cmp = cmp;
736 res = optimize_node (res);
737 irn_vrfy_irg(res, irg);
742 new_rd_Unknown (ir_graph *irg, ir_mode *m)
744 return new_ir_node (NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
748 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
752 in[0] = get_Call_ptr(call);
753 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
754 /* res->attr.callbegin.irg = irg; */
755 res->attr.callbegin.call = call;
756 res = optimize_node (res);
757 irn_vrfy_irg (res, irg);
762 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
766 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
769 irn_vrfy_irg (res, irg);
774 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
778 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
779 irg->end_except = res;
781 irn_vrfy_irg (res, irg);
786 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
789 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
790 res = optimize_node (res);
791 irn_vrfy_irg (res, irg);
796 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
802 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
803 res->attr.filter.proj = proj;
804 res->attr.filter.in_cg = NULL;
805 res->attr.filter.backedge = NULL;
808 assert(get_Proj_pred(res));
809 assert(get_nodes_Block(get_Proj_pred(res)));
811 res = optimize_node (res);
813 irn_vrfy_irg (res, irg);
819 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
820 ir_node *callee, int arity, ir_node **in, type *tp)
827 NEW_ARR_A (ir_node *, r_in, r_arity);
829 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
831 res = new_ir_node (db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
833 assert(is_method_type(tp));
834 set_FuncCall_type(res, tp);
835 res->attr.call.callee_arr = NULL;
836 res = optimize_node (res);
837 irn_vrfy_irg (res, irg);
842 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
843 return new_rd_Block(NULL, irg, arity, in);
845 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
846 return new_rd_Start(NULL, irg, block);
848 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
849 return new_rd_End(NULL, irg, block);
851 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
852 return new_rd_Jmp(NULL, irg, block);
854 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
855 return new_rd_Cond(NULL, irg, block, c);
857 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
858 ir_node *store, int arity, ir_node **in) {
859 return new_rd_Return(NULL, irg, block, store, arity, in);
861 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
862 ir_node *store, ir_node *obj) {
863 return new_rd_Raise(NULL, irg, block, store, obj);
865 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
866 ir_mode *mode, tarval *con) {
867 return new_rd_Const(NULL, irg, block, mode, con);
869 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
870 type_or_id_p value, symconst_kind symkind) {
871 return new_rd_SymConst(NULL, irg, block, value, symkind);
873 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
874 ir_node *objptr, int n_index, ir_node **index,
876 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
878 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
880 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
882 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
883 ir_node *callee, int arity, ir_node **in,
885 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
887 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
888 ir_node *op1, ir_node *op2, ir_mode *mode) {
889 return new_rd_Add(NULL, irg, block, op1, op2, mode);
891 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
892 ir_node *op1, ir_node *op2, ir_mode *mode) {
893 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
895 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
896 ir_node *op, ir_mode *mode) {
897 return new_rd_Minus(NULL, irg, block, op, mode);
899 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
900 ir_node *op1, ir_node *op2, ir_mode *mode) {
901 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
903 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
904 ir_node *memop, ir_node *op1, ir_node *op2) {
905 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
907 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
908 ir_node *memop, ir_node *op1, ir_node *op2) {
909 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
911 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
912 ir_node *memop, ir_node *op1, ir_node *op2) {
913 return new_rd_Div(NULL, irg, block, memop, op1, op2);
915 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
916 ir_node *memop, ir_node *op1, ir_node *op2) {
917 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
919 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
920 ir_node *op, ir_mode *mode) {
921 return new_rd_Abs(NULL, irg, block, op, mode);
923 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
924 ir_node *op1, ir_node *op2, ir_mode *mode) {
925 return new_rd_And(NULL, irg, block, op1, op2, mode);
927 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
928 ir_node *op1, ir_node *op2, ir_mode *mode) {
929 return new_rd_Or(NULL, irg, block, op1, op2, mode);
931 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
932 ir_node *op1, ir_node *op2, ir_mode *mode) {
933 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
935 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
936 ir_node *op, ir_mode *mode) {
937 return new_rd_Not(NULL, irg, block, op, mode);
939 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
940 ir_node *op1, ir_node *op2) {
941 return new_rd_Cmp(NULL, irg, block, op1, op2);
943 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
944 ir_node *op, ir_node *k, ir_mode *mode) {
945 return new_rd_Shl(NULL, irg, block, op, k, mode);
947 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
948 ir_node *op, ir_node *k, ir_mode *mode) {
949 return new_rd_Shr(NULL, irg, block, op, k, mode);
951 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
952 ir_node *op, ir_node *k, ir_mode *mode) {
953 return new_rd_Shrs(NULL, irg, block, op, k, mode);
955 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
956 ir_node *op, ir_node *k, ir_mode *mode) {
957 return new_rd_Rot(NULL, irg, block, op, k, mode);
959 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
960 ir_node *op, ir_mode *mode) {
961 return new_rd_Conv(NULL, irg, block, op, mode);
963 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
964 return new_rd_Cast(NULL, irg, block, op, to_tp);
966 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
967 ir_node **in, ir_mode *mode) {
968 return new_rd_Phi(NULL, irg, block, arity, in, mode);
970 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
971 ir_node *store, ir_node *adr) {
972 return new_rd_Load(NULL, irg, block, store, adr);
974 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
975 ir_node *store, ir_node *adr, ir_node *val) {
976 return new_rd_Store(NULL, irg, block, store, adr, val);
978 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
979 ir_node *size, type *alloc_type, where_alloc where) {
980 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
982 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
983 ir_node *ptr, ir_node *size, type *free_type) {
984 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
986 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
987 return new_rd_Sync(NULL, irg, block, arity, in);
989 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
990 ir_mode *mode, long proj) {
991 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
993 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
995 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
997 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
998 int arity, ir_node **in) {
999 return new_rd_Tuple(NULL, irg, block, arity, in );
1001 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1002 ir_node *val, ir_mode *mode) {
1003 return new_rd_Id(NULL, irg, block, val, mode);
1005 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1006 return new_rd_Bad(irg);
1008 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1009 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1011 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1012 return new_rd_Unknown(irg, m);
1014 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1015 return new_rd_CallBegin(NULL, irg, block, callee);
1017 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1018 return new_rd_EndReg(NULL, irg, block);
1020 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1021 return new_rd_EndExcept(NULL, irg, block);
1023 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1024 return new_rd_Break(NULL, irg, block);
1026 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1027 ir_mode *mode, long proj) {
1028 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1030 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1031 ir_node *callee, int arity, ir_node **in,
1033 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1037 /** ********************/
1038 /** public interfaces */
1039 /** construction tools */
1043 * - create a new Start node in the current block
1045 * @return s - pointer to the created Start node
1050 new_d_Start (dbg_info* db)
1054 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1055 op_Start, mode_T, 0, NULL);
1056 /* res->attr.start.irg = current_ir_graph; */
1058 res = optimize_node (res);
1059 irn_vrfy_irg (res, current_ir_graph);
1064 new_d_End (dbg_info* db)
1067 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1068 op_End, mode_X, -1, NULL);
1069 res = optimize_node (res);
1070 irn_vrfy_irg (res, current_ir_graph);
1075 /* Constructs a Block with a fixed number of predecessors.
1076 Does set current_block. Can be used with automatic Phi
1077 node construction. */
1079 new_d_Block (dbg_info* db, int arity, ir_node **in)
1083 bool has_unknown = false;
1085 res = new_rd_Block (db, current_ir_graph, arity, in);
1087 /* Create and initialize array for Phi-node construction. */
1088 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1089 current_ir_graph->n_loc);
1090 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1092 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1094 if (!has_unknown) res = optimize_node (res);
1095 current_ir_graph->current_block = res;
1097 irn_vrfy_irg (res, current_ir_graph);
1102 /* ***********************************************************************/
1103 /* Methods necessary for automatic Phi node creation */
1105 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1106 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1107 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1108 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1110 Call Graph: ( A ---> B == A "calls" B)
1112 get_value mature_block
1120 get_r_value_internal |
1124 new_rd_Phi0 new_rd_Phi_in
1126 * *************************************************************************** */
1128 /** Creates a Phi node with 0 predecessors */
1129 static INLINE ir_node *
1130 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1133 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1134 irn_vrfy_irg (res, irg);
1138 /* There are two implementations of the Phi node construction. The first
1139 is faster, but does not work for blocks with more than 2 predecessors.
1140 The second works always but is slower and causes more unnecessary Phi
1142 Select the implementations by the following preprocessor flag set in
1144 #if USE_FAST_PHI_CONSTRUCTION
1146 /* This is a stack used for allocating and deallocating nodes in
1147 new_rd_Phi_in. The original implementation used the obstack
1148 to model this stack, now it is explicit. This reduces side effects.
1150 #if USE_EXPLICIT_PHI_IN_STACK
1151 INLINE Phi_in_stack *
1152 new_Phi_in_stack(void) {
1155 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1157 res->stack = NEW_ARR_F (ir_node *, 1);
1164 free_Phi_in_stack(Phi_in_stack *s) {
1165 DEL_ARR_F(s->stack);
1169 free_to_Phi_in_stack(ir_node *phi) {
1170 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1171 current_ir_graph->Phi_in_stack->pos)
1172 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1174 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1176 (current_ir_graph->Phi_in_stack->pos)++;
1179 static INLINE ir_node *
1180 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1181 int arity, ir_node **in) {
1183 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1184 int pos = current_ir_graph->Phi_in_stack->pos;
1188 /* We need to allocate a new node */
1189 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1190 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1192 /* reuse the old node and initialize it again. */
1195 assert (res->kind == k_ir_node);
1196 assert (res->op == op_Phi);
1200 assert (arity >= 0);
1201 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1202 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1204 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1206 (current_ir_graph->Phi_in_stack->pos)--;
1210 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1212 /* Creates a Phi node with a given, fixed array **in of predecessors.
1213 If the Phi node is unnecessary, as the same value reaches the block
1214 through all control flow paths, it is eliminated and the value
1215 returned directly. This constructor is only intended for use in
1216 the automatic Phi node generation triggered by get_value or mature.
1217 The implementation is quite tricky and depends on the fact, that
1218 the nodes are allocated on a stack:
1219 The in array contains predecessors and NULLs. The NULLs appear,
1220 if get_r_value_internal, that computed the predecessors, reached
1221 the same block on two paths. In this case the same value reaches
1222 this block on both paths, there is no definition in between. We need
1223 not allocate a Phi where these path's merge, but we have to communicate
1224 this fact to the caller. This happens by returning a pointer to the
1225 node the caller _will_ allocate. (Yes, we predict the address. We can
1226 do so because the nodes are allocated on the obstack.) The caller then
1227 finds a pointer to itself and, when this routine is called again,
1230 static INLINE ir_node *
1231 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1234 ir_node *res, *known;
1236 /* Allocate a new node on the obstack. This can return a node to
1237 which some of the pointers in the in-array already point.
1238 Attention: the constructor copies the in array, i.e., the later
1239 changes to the array in this routine do not affect the
1240 constructed node! If the in array contains NULLs, there will be
1241 missing predecessors in the returned node. Is this a possible
1242 internal state of the Phi node generation? */
1243 #if USE_EXPLICIT_PHI_IN_STACK
1244 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1246 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1247 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1250 /* The in-array can contain NULLs. These were returned by
1251 get_r_value_internal if it reached the same block/definition on a
1252 second path. The NULLs are replaced by the node itself to
1253 simplify the test in the next loop. */
1254 for (i = 0; i < ins; ++i) {
1259 /* This loop checks whether the Phi has more than one predecessor.
1260 If so, it is a real Phi node and we break the loop. Else the Phi
1261 node merges the same definition on several paths and therefore is
1263 for (i = 0; i < ins; ++i)
1265 if (in[i] == res || in[i] == known) continue;
1273 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1275 #if USE_EXPLICIT_PHI_IN_STACK
1276 free_to_Phi_in_stack(res);
1278 obstack_free (current_ir_graph->obst, res);
1282 res = optimize_node (res);
1283 irn_vrfy_irg (res, irg);
1286 /* return the pointer to the Phi node. This node might be deallocated! */
1291 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1294 allocates and returns this node. The routine called to allocate the
1295 node might optimize it away and return a real value, or even a pointer
1296 to a deallocated Phi node on top of the obstack!
1297 This function is called with an in-array of proper size. **/
1299 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1301 ir_node *prevBlock, *res;
1304 /* This loop goes to all predecessor blocks of the block the Phi node is in
1305 and there finds the operands of the Phi node by calling
1306 get_r_value_internal. */
1307 for (i = 1; i <= ins; ++i) {
1308 assert (block->in[i]);
1309 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1311 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1314 /* After collecting all predecessors into the array nin a new Phi node
1315 with these predecessors is created. This constructor contains an
1316 optimization: If all predecessors of the Phi node are identical it
1317 returns the only operand instead of a new Phi node. If the value
1318 passes two different control flow edges without being defined, and
1319 this is the second path treated, a pointer to the node that will be
1320 allocated for the first path (recursion) is returned. We already
1321 know the address of this node, as it is the next node to be allocated
1322 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1323 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1325 /* Now we now the value for "pos" and can enter it in the array with
1326 all known local variables. Attention: this might be a pointer to
1327 a node, that later will be allocated!!! See new_rd_Phi_in.
1328 If this is called in mature, after some set_value in the same block,
1329 the proper value must not be overwritten:
1331 get_value (makes Phi0, put's it into graph_arr)
1332 set_value (overwrites Phi0 in graph_arr)
1333 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1336 if (!block->attr.block.graph_arr[pos]) {
1337 block->attr.block.graph_arr[pos] = res;
1339 /* printf(" value already computed by %s\n",
1340 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1346 /* This function returns the last definition of a variable. In case
1347 this variable was last defined in a previous block, Phi nodes are
1348 inserted. If the part of the firm graph containing the definition
1349 is not yet constructed, a dummy Phi node is returned. */
1351 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1354 /* There are 4 cases to treat.
1356 1. The block is not mature and we visit it the first time. We can not
1357 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1358 predecessors is returned. This node is added to the linked list (field
1359 "link") of the containing block to be completed when this block is
1360 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1363 2. The value is already known in this block, graph_arr[pos] is set and we
1364 visit the block the first time. We can return the value without
1365 creating any new nodes.
1367 3. The block is mature and we visit it the first time. A Phi node needs
1368 to be created (phi_merge). If the Phi is not needed, as all it's
1369 operands are the same value reaching the block through different
1370 paths, it's optimized away and the value itself is returned.
1372 4. The block is mature, and we visit it the second time. Now two
1373 subcases are possible:
1374 * The value was computed completely the last time we were here. This
1375 is the case if there is no loop. We can return the proper value.
1376 * The recursion that visited this node and set the flag did not
1377 return yet. We are computing a value in a loop and need to
1378 break the recursion without knowing the result yet.
1379 @@@ strange case. Straight forward we would create a Phi before
1380 starting the computation of it's predecessors. In this case we will
1381 find a Phi here in any case. The problem is that this implementation
1382 only creates a Phi after computing the predecessors, so that it is
1383 hard to compute self references of this Phi. @@@
1384 There is no simple check for the second subcase. Therefore we check
1385 for a second visit and treat all such cases as the second subcase.
1386 Anyways, the basic situation is the same: we reached a block
1387 on two paths without finding a definition of the value: No Phi
1388 nodes are needed on both paths.
1389 We return this information "Two paths, no Phi needed" by a very tricky
1390 implementation that relies on the fact that an obstack is a stack and
1391 will return a node with the same address on different allocations.
1392 Look also at phi_merge and new_rd_phi_in to understand this.
1393 @@@ Unfortunately this does not work, see testprogram
1394 three_cfpred_example.
1398 /* case 4 -- already visited. */
1399 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1401 /* visited the first time */
1402 set_irn_visited(block, get_irg_visited(current_ir_graph));
1404 /* Get the local valid value */
1405 res = block->attr.block.graph_arr[pos];
1407 /* case 2 -- If the value is actually computed, return it. */
1408 if (res) return res;
1410 if (block->attr.block.matured) { /* case 3 */
1412 /* The Phi has the same amount of ins as the corresponding block. */
1413 int ins = get_irn_arity(block);
1415 NEW_ARR_A (ir_node *, nin, ins);
1417 /* Phi merge collects the predecessors and then creates a node. */
1418 res = phi_merge (block, pos, mode, nin, ins);
1420 } else { /* case 1 */
1421 /* The block is not mature, we don't know how many in's are needed. A Phi
1422 with zero predecessors is created. Such a Phi node is called Phi0
1423 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1424 to the list of Phi0 nodes in this block to be matured by mature_block
1426 The Phi0 has to remember the pos of it's internal value. If the real
1427 Phi is computed, pos is used to update the array with the local
1430 res = new_rd_Phi0 (current_ir_graph, block, mode);
1431 res->attr.phi0_pos = pos;
1432 res->link = block->link;
1436 /* If we get here, the frontend missed a use-before-definition error */
1439 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1440 assert (mode->code >= irm_F && mode->code <= irm_P);
1441 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1442 tarval_mode_null[mode->code]);
1445 /* The local valid value is available now. */
1446 block->attr.block.graph_arr[pos] = res;
1454 it starts the recursion. This causes an Id at the entry of
1455 every block that has no definition of the value! **/
1457 #if USE_EXPLICIT_PHI_IN_STACK
1459 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1460 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1463 static INLINE ir_node *
1464 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1465 ir_node **in, int ins, ir_node *phi0)
1468 ir_node *res, *known;
1470 /* Allocate a new node on the obstack. The allocation copies the in
1472 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1473 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1475 /* This loop checks whether the Phi has more than one predecessor.
1476 If so, it is a real Phi node and we break the loop. Else the
1477 Phi node merges the same definition on several paths and therefore
1478 is not needed. Don't consider Bad nodes! */
1480 for (i=0; i < ins; ++i)
1484 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1486 /* Optimize self referencing Phis: We can't detect them yet properly, as
1487 they still refer to the Phi0 they will replace. So replace right now. */
1488 if (phi0 && in[i] == phi0) in[i] = res;
1490 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1498 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1501 obstack_free (current_ir_graph->obst, res);
1502 if (is_Phi(known)) {
1503 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1504 order, an enclosing Phi know may get superfluous. */
1505 res = optimize_in_place_2(known);
1506 if (res != known) { exchange(known, res); }
1511 /* A undefined value, e.g., in unreachable code. */
1515 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1516 irn_vrfy_irg (res, irg);
1517 /* Memory Phis in endless loops must be kept alive.
1518 As we can't distinguish these easily we keep all of them alive. */
1519 if ((res->op == op_Phi) && (mode == mode_M))
1520 add_End_keepalive(irg->end, res);
1527 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1529 #if PRECISE_EXC_CONTEXT
1531 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1533 /* Construct a new frag_array for node n.
1534 Copy the content from the current graph_arr of the corresponding block:
1535 this is the current state.
1536 Set ProjM(n) as current memory state.
1537 Further the last entry in frag_arr of current block points to n. This
1538 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1540 static INLINE ir_node ** new_frag_arr (ir_node *n)
1545 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1546 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1547 sizeof(ir_node *)*current_ir_graph->n_loc);
1549 /* turn off optimization before allocating Proj nodes, as res isn't
1551 opt = get_opt_optimize(); set_optimize(0);
1552 /* Here we rely on the fact that all frag ops have Memory as first result! */
1553 if (get_irn_op(n) == op_Call)
1554 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1556 assert((pn_Quot_M == pn_DivMod_M) &&
1557 (pn_Quot_M == pn_Div_M) &&
1558 (pn_Quot_M == pn_Mod_M) &&
1559 (pn_Quot_M == pn_Load_M) &&
1560 (pn_Quot_M == pn_Store_M) &&
1561 (pn_Quot_M == pn_Alloc_M) );
1562 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1566 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1570 static INLINE ir_node **
1571 get_frag_arr (ir_node *n) {
1572 if (get_irn_op(n) == op_Call) {
1573 return n->attr.call.frag_arr;
1574 } else if (get_irn_op(n) == op_Alloc) {
1575 return n->attr.a.frag_arr;
1577 return n->attr.frag_arr;
1582 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1584 if (!frag_arr[pos]) frag_arr[pos] = val;
1585 if (frag_arr[current_ir_graph->n_loc - 1]) {
1586 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1587 assert(arr != frag_arr && "Endless recursion detected");
1588 set_frag_value(arr, pos, val);
1593 for (i = 0; i < 1000; ++i) {
1594 if (!frag_arr[pos]) {
1595 frag_arr[pos] = val;
1597 if (frag_arr[current_ir_graph->n_loc - 1]) {
1598 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1604 assert(0 && "potential endless recursion");
1609 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1613 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1615 frag_arr = get_frag_arr(cfOp);
1616 res = frag_arr[pos];
1618 if (block->attr.block.graph_arr[pos]) {
1619 /* There was a set_value after the cfOp and no get_value before that
1620 set_value. We must build a Phi node now. */
1621 if (block->attr.block.matured) {
1622 int ins = get_irn_arity(block);
1624 NEW_ARR_A (ir_node *, nin, ins);
1625 res = phi_merge(block, pos, mode, nin, ins);
1627 res = new_rd_Phi0 (current_ir_graph, block, mode);
1628 res->attr.phi0_pos = pos;
1629 res->link = block->link;
1633 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1634 but this should be better: (remove comment if this works) */
1635 /* It's a Phi, we can write this into all graph_arrs with NULL */
1636 set_frag_value(block->attr.block.graph_arr, pos, res);
1638 res = get_r_value_internal(block, pos, mode);
1639 set_frag_value(block->attr.block.graph_arr, pos, res);
1647 computes the predecessors for the real phi node, and then
1648 allocates and returns this node. The routine called to allocate the
1649 node might optimize it away and return a real value.
1650 This function must be called with an in-array of proper size. **/
1652 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1654 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1657 /* If this block has no value at pos create a Phi0 and remember it
1658 in graph_arr to break recursions.
1659 Else we may not set graph_arr as there a later value is remembered. */
1661 if (!block->attr.block.graph_arr[pos]) {
1662 if (block == get_irg_start_block(current_ir_graph)) {
1663 /* Collapsing to Bad tarvals is no good idea.
1664 So we call a user-supplied routine here that deals with this case as
1665 appropriate for the given language. Sorryly the only help we can give
1666 here is the position.
1668 Even if all variables are defined before use, it can happen that
1669 we get to the start block, if a cond has been replaced by a tuple
1670 (bad, jmp). In this case we call the function needlessly, eventually
1671 generating an non existant error.
1672 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1675 if (default_initialize_local_variable)
1676 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1678 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1679 /* We don't need to care about exception ops in the start block.
1680 There are none by definition. */
1681 return block->attr.block.graph_arr[pos];
1683 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1684 block->attr.block.graph_arr[pos] = phi0;
1685 #if PRECISE_EXC_CONTEXT
1686 if (get_opt_precise_exc_context()) {
1687 /* Set graph_arr for fragile ops. Also here we should break recursion.
1688 We could choose a cyclic path through an cfop. But the recursion would
1689 break at some point. */
1690 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1696 /* This loop goes to all predecessor blocks of the block the Phi node
1697 is in and there finds the operands of the Phi node by calling
1698 get_r_value_internal. */
1699 for (i = 1; i <= ins; ++i) {
1700 prevCfOp = skip_Proj(block->in[i]);
1702 if (is_Bad(prevCfOp)) {
1703 /* In case a Cond has been optimized we would get right to the start block
1704 with an invalid definition. */
1705 nin[i-1] = new_Bad();
1708 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1710 if (!is_Bad(prevBlock)) {
1711 #if PRECISE_EXC_CONTEXT
1712 if (get_opt_precise_exc_context() &&
1713 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1714 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1715 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1718 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1720 nin[i-1] = new_Bad();
1724 /* We want to pass the Phi0 node to the constructor: this finds additional
1725 optimization possibilities.
1726 The Phi0 node either is allocated in this function, or it comes from
1727 a former call to get_r_value_internal. In this case we may not yet
1728 exchange phi0, as this is done in mature_block. */
1730 phi0_all = block->attr.block.graph_arr[pos];
1731 if (!((get_irn_op(phi0_all) == op_Phi) &&
1732 (get_irn_arity(phi0_all) == 0) &&
1733 (get_nodes_block(phi0_all) == block)))
1739 /* After collecting all predecessors into the array nin a new Phi node
1740 with these predecessors is created. This constructor contains an
1741 optimization: If all predecessors of the Phi node are identical it
1742 returns the only operand instead of a new Phi node. */
1743 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1745 /* In case we allocated a Phi0 node at the beginning of this procedure,
1746 we need to exchange this Phi0 with the real Phi. */
1748 exchange(phi0, res);
1749 block->attr.block.graph_arr[pos] = res;
1750 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1751 only an optimization. */
1757 /* This function returns the last definition of a variable. In case
1758 this variable was last defined in a previous block, Phi nodes are
1759 inserted. If the part of the firm graph containing the definition
1760 is not yet constructed, a dummy Phi node is returned. */
1762 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1765 /* There are 4 cases to treat.
1767 1. The block is not mature and we visit it the first time. We can not
1768 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1769 predecessors is returned. This node is added to the linked list (field
1770 "link") of the containing block to be completed when this block is
1771 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1774 2. The value is already known in this block, graph_arr[pos] is set and we
1775 visit the block the first time. We can return the value without
1776 creating any new nodes.
1778 3. The block is mature and we visit it the first time. A Phi node needs
1779 to be created (phi_merge). If the Phi is not needed, as all it's
1780 operands are the same value reaching the block through different
1781 paths, it's optimized away and the value itself is returned.
1783 4. The block is mature, and we visit it the second time. Now two
1784 subcases are possible:
1785 * The value was computed completely the last time we were here. This
1786 is the case if there is no loop. We can return the proper value.
1787 * The recursion that visited this node and set the flag did not
1788 return yet. We are computing a value in a loop and need to
1789 break the recursion. This case only happens if we visited
1790 the same block with phi_merge before, which inserted a Phi0.
1791 So we return the Phi0.
1794 /* case 4 -- already visited. */
1795 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1796 /* As phi_merge allocates a Phi0 this value is always defined. Here
1797 is the critical difference of the two algorithms. */
1798 assert(block->attr.block.graph_arr[pos]);
1799 return block->attr.block.graph_arr[pos];
1802 /* visited the first time */
1803 set_irn_visited(block, get_irg_visited(current_ir_graph));
1805 /* Get the local valid value */
1806 res = block->attr.block.graph_arr[pos];
1808 /* case 2 -- If the value is actually computed, return it. */
1809 if (res) { return res; };
1811 if (block->attr.block.matured) { /* case 3 */
1813 /* The Phi has the same amount of ins as the corresponding block. */
1814 int ins = get_irn_arity(block);
1816 NEW_ARR_A (ir_node *, nin, ins);
1818 /* Phi merge collects the predecessors and then creates a node. */
1819 res = phi_merge (block, pos, mode, nin, ins);
1821 } else { /* case 1 */
1822 /* The block is not mature, we don't know how many in's are needed. A Phi
1823 with zero predecessors is created. Such a Phi node is called Phi0
1824 node. The Phi0 is then added to the list of Phi0 nodes in this block
1825 to be matured by mature_block later.
1826 The Phi0 has to remember the pos of it's internal value. If the real
1827 Phi is computed, pos is used to update the array with the local
1829 res = new_rd_Phi0 (current_ir_graph, block, mode);
1830 res->attr.phi0_pos = pos;
1831 res->link = block->link;
1835 /* If we get here, the frontend missed a use-before-definition error */
1838 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1839 assert (mode->code >= irm_F && mode->code <= irm_P);
1840 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1841 get_mode_null(mode));
1844 /* The local valid value is available now. */
1845 block->attr.block.graph_arr[pos] = res;
1850 #endif /* USE_FAST_PHI_CONSTRUCTION */
1852 /* ************************************************************************** */
1854 /** Finalize a Block node, when all control flows are known. */
1855 /** Acceptable parameters are only Block nodes. */
1857 mature_block (ir_node *block)
1864 assert (get_irn_opcode(block) == iro_Block);
1865 /* @@@ should be commented in
1866 assert (!get_Block_matured(block) && "Block already matured"); */
1868 if (!get_Block_matured(block)) {
1869 ins = ARR_LEN (block->in)-1;
1870 /* Fix block parameters */
1871 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1873 /* An array for building the Phi nodes. */
1874 NEW_ARR_A (ir_node *, nin, ins);
1876 /* Traverse a chain of Phi nodes attached to this block and mature
1878 for (n = block->link; n; n=next) {
1879 inc_irg_visited(current_ir_graph);
1881 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1884 block->attr.block.matured = 1;
1886 /* Now, as the block is a finished firm node, we can optimize it.
1887 Since other nodes have been allocated since the block was created
1888 we can not free the node on the obstack. Therefore we have to call
1890 Unfortunately the optimization does not change a lot, as all allocated
1891 nodes refer to the unoptimized node.
1892 We can call _2, as global cse has no effect on blocks. */
1893 block = optimize_in_place_2(block);
1894 irn_vrfy_irg(block, current_ir_graph);
1899 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1901 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1906 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1908 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1913 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1915 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1921 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1923 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1928 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1930 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1935 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1938 assert(arg->op == op_Cond);
1939 arg->attr.c.kind = fragmentary;
1940 arg->attr.c.default_proj = max_proj;
1941 res = new_Proj (arg, mode_X, max_proj);
1946 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1948 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1953 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1955 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1959 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1961 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1966 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1968 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1973 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1975 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1981 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1983 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1988 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1990 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1995 * allocate the frag array
1997 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
1998 if (get_opt_precise_exc_context()) {
1999 if ((current_ir_graph->phase_state == phase_building) &&
2000 (get_irn_op(res) == op) && /* Could be optimized away. */
2001 !*frag_store) /* Could be a cse where the arr is already set. */ {
2002 *frag_store = new_frag_arr(res);
2009 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2012 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2014 #if PRECISE_EXC_CONTEXT
2015 allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */
2022 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2025 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2027 #if PRECISE_EXC_CONTEXT
2028 allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */
2035 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2038 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2040 #if PRECISE_EXC_CONTEXT
2041 allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */
2048 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2051 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2053 #if PRECISE_EXC_CONTEXT
2054 allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */
2061 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2063 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2068 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2070 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2075 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2077 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2082 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2084 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2089 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2091 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2096 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2098 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2103 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2105 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2110 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2112 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2117 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2119 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2124 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2126 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2131 new_d_Jmp (dbg_info* db)
2133 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2137 new_d_Cond (dbg_info* db, ir_node *c)
2139 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2143 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2147 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2148 store, callee, arity, in, tp);
2149 #if PRECISE_EXC_CONTEXT
2150 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2157 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2159 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2164 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2166 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2171 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2174 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2176 #if PRECISE_EXC_CONTEXT
2177 allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */
2184 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2187 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2189 #if PRECISE_EXC_CONTEXT
2190 allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */
2197 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2201 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2202 store, size, alloc_type, where);
2203 #if PRECISE_EXC_CONTEXT
2204 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2211 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2213 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2214 store, ptr, size, free_type);
2218 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2219 /* GL: objptr was called frame before. Frame was a bad choice for the name
2220 as the operand could as well be a pointer to a dynamic object. */
2222 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2223 store, objptr, 0, NULL, ent);
2227 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2229 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2230 store, objptr, n_index, index, sel);
2234 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2236 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2237 store, objptr, ent));
2241 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2243 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2248 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2250 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2258 return current_ir_graph->bad;
2262 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2264 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2269 new_d_Unknown (ir_mode *m)
2271 return new_rd_Unknown(current_ir_graph, m);
2275 new_d_CallBegin (dbg_info *db, ir_node *call)
2278 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2283 new_d_EndReg (dbg_info *db)
2286 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2291 new_d_EndExcept (dbg_info *db)
2294 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2299 new_d_Break (dbg_info *db)
2301 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2305 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2307 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2312 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2316 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2317 callee, arity, in, tp);
2322 /* ********************************************************************* */
2323 /* Comfortable interface with automatic Phi node construction. */
2324 /* (Uses also constructors of ?? interface, except new_Block. */
2325 /* ********************************************************************* */
2327 /* * Block construction **/
2328 /* immature Block without predecessors */
2329 ir_node *new_d_immBlock (dbg_info* db) {
2332 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2333 /* creates a new dynamic in-array as length of in is -1 */
2334 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2335 current_ir_graph->current_block = res;
2336 res->attr.block.matured = 0;
2337 /* res->attr.block.exc = exc_normal; */
2338 /* res->attr.block.handler_entry = 0; */
2339 res->attr.block.irg = current_ir_graph;
2340 res->attr.block.backedge = NULL;
2341 res->attr.block.in_cg = NULL;
2342 res->attr.block.cg_backedge = NULL;
2343 set_Block_block_visited(res, 0);
2345 /* Create and initialize array for Phi-node construction. */
2346 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2347 current_ir_graph->n_loc);
2348 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2350 /* Immature block may not be optimized! */
2351 irn_vrfy_irg (res, current_ir_graph);
2358 return new_d_immBlock(NULL);
2361 /* add an adge to a jmp/control flow node */
2363 add_in_edge (ir_node *block, ir_node *jmp)
2365 if (block->attr.block.matured) {
2366 assert(0 && "Error: Block already matured!\n");
2369 assert (jmp != NULL);
2370 ARR_APP1 (ir_node *, block->in, jmp);
2374 /* changing the current block */
2376 switch_block (ir_node *target)
2378 current_ir_graph->current_block = target;
2381 /* ************************ */
2382 /* parameter administration */
2384 /* get a value from the parameter array from the current block by its index */
2386 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2388 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2389 inc_irg_visited(current_ir_graph);
2391 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2393 /* get a value from the parameter array from the current block by its index */
2395 get_value (int pos, ir_mode *mode)
2397 return get_d_value(NULL, pos, mode);
2400 /* set a value at position pos in the parameter array from the current block */
2402 set_value (int pos, ir_node *value)
2404 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2405 assert(pos+1 < current_ir_graph->n_loc);
2406 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2409 /* get the current store */
2413 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2414 /* GL: one could call get_value instead */
2415 inc_irg_visited(current_ir_graph);
2416 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2419 /* set the current store */
2421 set_store (ir_node *store)
2423 /* GL: one could call set_value instead */
2424 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2425 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2429 keep_alive (ir_node *ka)
2431 add_End_keepalive(current_ir_graph->end, ka);
2434 /** Useful access routines **/
2435 /* Returns the current block of the current graph. To set the current
2436 block use switch_block(). */
2437 ir_node *get_cur_block() {
2438 return get_irg_current_block(current_ir_graph);
2441 /* Returns the frame type of the current graph */
2442 type *get_cur_frame_type() {
2443 return get_irg_frame_type(current_ir_graph);
2447 /* ********************************************************************* */
2450 /* call once for each run of the library */
2452 init_cons (default_initialize_local_variable_func_t *func)
2454 default_initialize_local_variable = func;
2457 /* call for each graph */
2459 finalize_cons (ir_graph *irg) {
2460 irg->phase_state = phase_high;
2464 ir_node *new_Block(int arity, ir_node **in) {
2465 return new_d_Block(NULL, arity, in);
2467 ir_node *new_Start (void) {
2468 return new_d_Start(NULL);
2470 ir_node *new_End (void) {
2471 return new_d_End(NULL);
2473 ir_node *new_Jmp (void) {
2474 return new_d_Jmp(NULL);
2476 ir_node *new_Cond (ir_node *c) {
2477 return new_d_Cond(NULL, c);
2479 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2480 return new_d_Return(NULL, store, arity, in);
2482 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2483 return new_d_Raise(NULL, store, obj);
2485 ir_node *new_Const (ir_mode *mode, tarval *con) {
2486 return new_d_Const(NULL, mode, con);
2488 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2489 return new_d_SymConst(NULL, value, kind);
2491 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2492 return new_d_simpleSel(NULL, store, objptr, ent);
2494 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2496 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2498 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2499 return new_d_InstOf (NULL, store, objptr, ent);
2501 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2503 return new_d_Call(NULL, store, callee, arity, in, tp);
2505 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2506 return new_d_Add(NULL, op1, op2, mode);
2508 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2509 return new_d_Sub(NULL, op1, op2, mode);
2511 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2512 return new_d_Minus(NULL, op, mode);
2514 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2515 return new_d_Mul(NULL, op1, op2, mode);
2517 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2518 return new_d_Quot(NULL, memop, op1, op2);
2520 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2521 return new_d_DivMod(NULL, memop, op1, op2);
2523 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2524 return new_d_Div(NULL, memop, op1, op2);
2526 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2527 return new_d_Mod(NULL, memop, op1, op2);
2529 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2530 return new_d_Abs(NULL, op, mode);
2532 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2533 return new_d_And(NULL, op1, op2, mode);
2535 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2536 return new_d_Or(NULL, op1, op2, mode);
2538 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2539 return new_d_Eor(NULL, op1, op2, mode);
2541 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2542 return new_d_Not(NULL, op, mode);
2544 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2545 return new_d_Shl(NULL, op, k, mode);
2547 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2548 return new_d_Shr(NULL, op, k, mode);
2550 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2551 return new_d_Shrs(NULL, op, k, mode);
2553 #define new_Rotate new_Rot
2554 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2555 return new_d_Rot(NULL, op, k, mode);
2557 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2558 return new_d_Cmp(NULL, op1, op2);
2560 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2561 return new_d_Conv(NULL, op, mode);
2563 ir_node *new_Cast (ir_node *op, type *to_tp) {
2564 return new_d_Cast(NULL, op, to_tp);
2566 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2567 return new_d_Phi(NULL, arity, in, mode);
2569 ir_node *new_Load (ir_node *store, ir_node *addr) {
2570 return new_d_Load(NULL, store, addr);
2572 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2573 return new_d_Store(NULL, store, addr, val);
2575 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2576 where_alloc where) {
2577 return new_d_Alloc(NULL, store, size, alloc_type, where);
2579 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2581 return new_d_Free(NULL, store, ptr, size, free_type);
2583 ir_node *new_Sync (int arity, ir_node **in) {
2584 return new_d_Sync(NULL, arity, in);
2586 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2587 return new_d_Proj(NULL, arg, mode, proj);
2589 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2590 return new_d_defaultProj(NULL, arg, max_proj);
2592 ir_node *new_Tuple (int arity, ir_node **in) {
2593 return new_d_Tuple(NULL, arity, in);
2595 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2596 return new_d_Id(NULL, val, mode);
2598 ir_node *new_Bad (void) {
2601 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2602 return new_d_Confirm (NULL, val, bound, cmp);
2604 ir_node *new_Unknown(ir_mode *m) {
2605 return new_d_Unknown(m);
2607 ir_node *new_CallBegin (ir_node *callee) {
2608 return new_d_CallBegin(NULL, callee);
2610 ir_node *new_EndReg (void) {
2611 return new_d_EndReg(NULL);
2613 ir_node *new_EndExcept (void) {
2614 return new_d_EndExcept(NULL);
2616 ir_node *new_Break (void) {
2617 return new_d_Break(NULL);
2619 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2620 return new_d_Filter(NULL, arg, mode, proj);
2622 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2623 return new_d_FuncCall(NULL, callee, arity, in, tp);