3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 /* res->attr.block.exc = exc_normal; */
64 /* res->attr.block.handler_entry = 0; */
65 res->attr.block.irg = irg;
66 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
67 res->attr.block.in_cg = NULL;
68 res->attr.block.cg_backedge = NULL;
70 irn_vrfy_irg (res, irg);
75 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
79 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
80 /* res->attr.start.irg = irg; */
82 irn_vrfy_irg (res, irg);
87 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
91 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
93 irn_vrfy_irg (res, irg);
97 /* Creates a Phi node with all predecessors. Calling this constructor
98 is only allowed if the corresponding block is mature. */
100 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
104 bool has_unknown = false;
106 /* Don't assert that block matured: the use of this constructor is strongly
108 if ( get_Block_matured(block) )
109 assert( get_irn_arity(block) == arity );
111 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
113 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
115 for (i = arity-1; i >= 0; i--)
116 if (get_irn_op(in[i]) == op_Unknown) {
121 if (!has_unknown) res = optimize_node (res);
122 irn_vrfy_irg (res, irg);
124 /* Memory Phis in endless loops must be kept alive.
125 As we can't distinguish these easily we keep all of them alive. */
126 if ((res->op == op_Phi) && (mode == mode_M))
127 add_End_keepalive(irg->end, res);
132 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
135 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
136 res->attr.con.tv = con;
137 set_Const_type(res, tp); /* Call method because of complex assertion. */
138 res = optimize_node (res);
139 assert(get_Const_type(res) == tp);
140 irn_vrfy_irg (res, irg);
146 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
148 type *tp = unknown_type;
149 /* removing this somehow causes errors in jack. */
150 if (tarval_is_entity(con))
151 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
153 return new_rd_Const_type (db, irg, block, mode, con, tp);
157 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
162 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
163 res = optimize_node (res);
164 irn_vrfy_irg (res, irg);
169 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
175 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
176 res->attr.proj = proj;
179 assert(get_Proj_pred(res));
180 assert(get_nodes_Block(get_Proj_pred(res)));
182 res = optimize_node (res);
184 irn_vrfy_irg (res, irg);
190 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
194 assert(arg->op == op_Cond);
195 arg->attr.c.kind = fragmentary;
196 arg->attr.c.default_proj = max_proj;
197 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
202 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
207 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
208 res = optimize_node (res);
209 irn_vrfy_irg (res, irg);
214 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
217 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
218 res->attr.cast.totype = to_tp;
219 res = optimize_node (res);
220 irn_vrfy_irg (res, irg);
225 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
229 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
230 res = optimize_node (res);
231 irn_vrfy_irg (res, irg);
236 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
237 ir_node *op1, ir_node *op2, ir_mode *mode)
243 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
244 res = optimize_node (res);
245 irn_vrfy_irg (res, irg);
250 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
251 ir_node *op1, ir_node *op2, ir_mode *mode)
257 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
258 res = optimize_node (res);
259 irn_vrfy_irg (res, irg);
264 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
265 ir_node *op, ir_mode *mode)
270 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
271 res = optimize_node (res);
272 irn_vrfy_irg (res, irg);
277 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
278 ir_node *op1, ir_node *op2, ir_mode *mode)
284 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
285 res = optimize_node (res);
286 irn_vrfy_irg (res, irg);
291 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
292 ir_node *memop, ir_node *op1, ir_node *op2)
299 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
300 res = optimize_node (res);
301 irn_vrfy_irg (res, irg);
306 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
307 ir_node *memop, ir_node *op1, ir_node *op2)
314 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
315 res = optimize_node (res);
316 irn_vrfy_irg (res, irg);
321 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
322 ir_node *memop, ir_node *op1, ir_node *op2)
329 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
330 res = optimize_node (res);
331 irn_vrfy_irg (res, irg);
336 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
337 ir_node *memop, ir_node *op1, ir_node *op2)
344 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
345 res = optimize_node (res);
346 irn_vrfy_irg (res, irg);
351 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
352 ir_node *op1, ir_node *op2, ir_mode *mode)
358 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
359 res = optimize_node (res);
360 irn_vrfy_irg (res, irg);
365 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
366 ir_node *op1, ir_node *op2, ir_mode *mode)
372 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
373 res = optimize_node (res);
374 irn_vrfy_irg (res, irg);
379 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
380 ir_node *op1, ir_node *op2, ir_mode *mode)
386 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
387 res = optimize_node (res);
388 irn_vrfy_irg (res, irg);
393 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
394 ir_node *op, ir_mode *mode)
399 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
400 res = optimize_node (res);
401 irn_vrfy_irg (res, irg);
406 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
407 ir_node *op, ir_node *k, ir_mode *mode)
413 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
414 res = optimize_node (res);
415 irn_vrfy_irg (res, irg);
420 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
421 ir_node *op, ir_node *k, ir_mode *mode)
427 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
428 res = optimize_node (res);
429 irn_vrfy_irg (res, irg);
434 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
435 ir_node *op, ir_node *k, ir_mode *mode)
441 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
442 res = optimize_node (res);
443 irn_vrfy_irg (res, irg);
448 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
449 ir_node *op, ir_node *k, ir_mode *mode)
455 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
456 res = optimize_node (res);
457 irn_vrfy_irg (res, irg);
462 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
463 ir_node *op, ir_mode *mode)
468 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
469 res = optimize_node (res);
470 irn_vrfy_irg (res, irg);
475 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
476 ir_node *op1, ir_node *op2)
482 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
483 res = optimize_node (res);
484 irn_vrfy_irg (res, irg);
489 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
492 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
493 res = optimize_node (res);
494 irn_vrfy_irg (res, irg);
499 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
504 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
505 res->attr.c.kind = dense;
506 res->attr.c.default_proj = 0;
507 res = optimize_node (res);
508 irn_vrfy_irg (res, irg);
513 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
514 ir_node *callee, int arity, ir_node **in, type *tp)
521 NEW_ARR_A (ir_node *, r_in, r_arity);
524 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
526 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
528 assert(is_method_type(tp));
529 set_Call_type(res, tp);
530 res->attr.call.callee_arr = NULL;
531 res = optimize_node (res);
532 irn_vrfy_irg (res, irg);
537 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
538 ir_node *store, int arity, ir_node **in)
545 NEW_ARR_A (ir_node *, r_in, r_arity);
547 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
548 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
549 res = optimize_node (res);
550 irn_vrfy_irg (res, irg);
555 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
561 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
562 res = optimize_node (res);
563 irn_vrfy_irg (res, irg);
568 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
569 ir_node *store, ir_node *adr)
575 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
577 res = optimize_node (res);
578 irn_vrfy_irg (res, irg);
583 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
584 ir_node *store, ir_node *adr, ir_node *val)
591 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
593 res = optimize_node (res);
595 irn_vrfy_irg (res, irg);
600 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
601 ir_node *size, type *alloc_type, where_alloc where)
607 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
609 res->attr.a.where = where;
610 res->attr.a.type = alloc_type;
612 res = optimize_node (res);
613 irn_vrfy_irg (res, irg);
618 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
619 ir_node *ptr, ir_node *size, type *free_type)
626 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
628 res->attr.f = free_type;
630 res = optimize_node (res);
631 irn_vrfy_irg (res, irg);
636 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
637 int arity, ir_node **in, entity *ent)
643 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
646 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
649 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
650 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
652 res->attr.s.ent = ent;
654 res = optimize_node (res);
655 irn_vrfy_irg (res, irg);
660 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
661 ir_node *objptr, type *ent)
668 NEW_ARR_A (ir_node *, r_in, r_arity);
672 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
674 res->attr.io.ent = ent;
676 /* res = optimize (res);
677 * irn_vrfy_irg (res, irg); */
682 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
683 symconst_kind symkind, type *tp)
687 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
691 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
693 res->attr.i.num = symkind;
694 res->attr.i.sym = value;
697 res = optimize_node (res);
698 irn_vrfy_irg (res, irg);
703 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
704 symconst_kind symkind)
706 ir_node *res = new_rd_SymConst_type (db, irg, block, value, symkind, unknown_type);
711 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
715 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
717 res = optimize_node (res);
718 irn_vrfy_irg (res, irg);
723 new_rd_Bad (ir_graph *irg)
729 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
731 ir_node *in[2], *res;
735 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
737 res->attr.confirm_cmp = cmp;
739 res = optimize_node (res);
740 irn_vrfy_irg(res, irg);
745 new_rd_Unknown (ir_graph *irg, ir_mode *m)
747 return new_ir_node (NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
751 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
755 in[0] = get_Call_ptr(call);
756 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
757 /* res->attr.callbegin.irg = irg; */
758 res->attr.callbegin.call = call;
759 res = optimize_node (res);
760 irn_vrfy_irg (res, irg);
765 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
769 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
772 irn_vrfy_irg (res, irg);
777 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
781 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
782 irg->end_except = res;
784 irn_vrfy_irg (res, irg);
789 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
792 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
793 res = optimize_node (res);
794 irn_vrfy_irg (res, irg);
799 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
805 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
806 res->attr.filter.proj = proj;
807 res->attr.filter.in_cg = NULL;
808 res->attr.filter.backedge = NULL;
811 assert(get_Proj_pred(res));
812 assert(get_nodes_Block(get_Proj_pred(res)));
814 res = optimize_node (res);
816 irn_vrfy_irg (res, irg);
822 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
823 ir_node *callee, int arity, ir_node **in, type *tp)
830 NEW_ARR_A (ir_node *, r_in, r_arity);
832 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
834 res = new_ir_node (db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
836 assert(is_method_type(tp));
837 set_FuncCall_type(res, tp);
838 res->attr.call.callee_arr = NULL;
839 res = optimize_node (res);
840 irn_vrfy_irg (res, irg);
845 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
846 return new_rd_Block(NULL, irg, arity, in);
848 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
849 return new_rd_Start(NULL, irg, block);
851 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
852 return new_rd_End(NULL, irg, block);
854 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
855 return new_rd_Jmp(NULL, irg, block);
857 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
858 return new_rd_Cond(NULL, irg, block, c);
860 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
861 ir_node *store, int arity, ir_node **in) {
862 return new_rd_Return(NULL, irg, block, store, arity, in);
864 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
865 ir_node *store, ir_node *obj) {
866 return new_rd_Raise(NULL, irg, block, store, obj);
868 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
869 ir_mode *mode, tarval *con) {
870 return new_rd_Const(NULL, irg, block, mode, con);
872 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
873 symconst_symbol value, symconst_kind symkind) {
874 return new_rd_SymConst(NULL, irg, block, value, symkind);
876 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
877 ir_node *objptr, int n_index, ir_node **index,
879 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
881 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
883 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
885 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
886 ir_node *callee, int arity, ir_node **in,
888 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
890 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
891 ir_node *op1, ir_node *op2, ir_mode *mode) {
892 return new_rd_Add(NULL, irg, block, op1, op2, mode);
894 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
895 ir_node *op1, ir_node *op2, ir_mode *mode) {
896 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
898 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
899 ir_node *op, ir_mode *mode) {
900 return new_rd_Minus(NULL, irg, block, op, mode);
902 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
903 ir_node *op1, ir_node *op2, ir_mode *mode) {
904 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
906 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
907 ir_node *memop, ir_node *op1, ir_node *op2) {
908 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
910 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
911 ir_node *memop, ir_node *op1, ir_node *op2) {
912 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
914 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
915 ir_node *memop, ir_node *op1, ir_node *op2) {
916 return new_rd_Div(NULL, irg, block, memop, op1, op2);
918 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
919 ir_node *memop, ir_node *op1, ir_node *op2) {
920 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
922 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
923 ir_node *op, ir_mode *mode) {
924 return new_rd_Abs(NULL, irg, block, op, mode);
926 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
927 ir_node *op1, ir_node *op2, ir_mode *mode) {
928 return new_rd_And(NULL, irg, block, op1, op2, mode);
930 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
931 ir_node *op1, ir_node *op2, ir_mode *mode) {
932 return new_rd_Or(NULL, irg, block, op1, op2, mode);
934 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
935 ir_node *op1, ir_node *op2, ir_mode *mode) {
936 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
938 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
939 ir_node *op, ir_mode *mode) {
940 return new_rd_Not(NULL, irg, block, op, mode);
942 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
943 ir_node *op1, ir_node *op2) {
944 return new_rd_Cmp(NULL, irg, block, op1, op2);
946 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
947 ir_node *op, ir_node *k, ir_mode *mode) {
948 return new_rd_Shl(NULL, irg, block, op, k, mode);
950 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
951 ir_node *op, ir_node *k, ir_mode *mode) {
952 return new_rd_Shr(NULL, irg, block, op, k, mode);
954 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
955 ir_node *op, ir_node *k, ir_mode *mode) {
956 return new_rd_Shrs(NULL, irg, block, op, k, mode);
958 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
959 ir_node *op, ir_node *k, ir_mode *mode) {
960 return new_rd_Rot(NULL, irg, block, op, k, mode);
962 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
963 ir_node *op, ir_mode *mode) {
964 return new_rd_Conv(NULL, irg, block, op, mode);
966 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
967 return new_rd_Cast(NULL, irg, block, op, to_tp);
969 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
970 ir_node **in, ir_mode *mode) {
971 return new_rd_Phi(NULL, irg, block, arity, in, mode);
973 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
974 ir_node *store, ir_node *adr) {
975 return new_rd_Load(NULL, irg, block, store, adr);
977 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
978 ir_node *store, ir_node *adr, ir_node *val) {
979 return new_rd_Store(NULL, irg, block, store, adr, val);
981 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
982 ir_node *size, type *alloc_type, where_alloc where) {
983 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
985 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
986 ir_node *ptr, ir_node *size, type *free_type) {
987 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
989 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
990 return new_rd_Sync(NULL, irg, block, arity, in);
992 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
993 ir_mode *mode, long proj) {
994 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
996 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
998 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1000 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1001 int arity, ir_node **in) {
1002 return new_rd_Tuple(NULL, irg, block, arity, in );
1004 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1005 ir_node *val, ir_mode *mode) {
1006 return new_rd_Id(NULL, irg, block, val, mode);
1008 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1009 return new_rd_Bad(irg);
1011 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1012 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1014 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1015 return new_rd_Unknown(irg, m);
1017 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1018 return new_rd_CallBegin(NULL, irg, block, callee);
1020 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1021 return new_rd_EndReg(NULL, irg, block);
1023 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1024 return new_rd_EndExcept(NULL, irg, block);
1026 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1027 return new_rd_Break(NULL, irg, block);
1029 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1030 ir_mode *mode, long proj) {
1031 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1033 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1034 ir_node *callee, int arity, ir_node **in,
1036 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1040 /** ********************/
1041 /** public interfaces */
1042 /** construction tools */
1046 * - create a new Start node in the current block
1048 * @return s - pointer to the created Start node
1053 new_d_Start (dbg_info* db)
1057 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1058 op_Start, mode_T, 0, NULL);
1059 /* res->attr.start.irg = current_ir_graph; */
1061 res = optimize_node (res);
1062 irn_vrfy_irg (res, current_ir_graph);
1067 new_d_End (dbg_info* db)
1070 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1071 op_End, mode_X, -1, NULL);
1072 res = optimize_node (res);
1073 irn_vrfy_irg (res, current_ir_graph);
1078 /* Constructs a Block with a fixed number of predecessors.
1079 Does set current_block. Can be used with automatic Phi
1080 node construction. */
1082 new_d_Block (dbg_info* db, int arity, ir_node **in)
1086 bool has_unknown = false;
1088 res = new_rd_Block (db, current_ir_graph, arity, in);
1090 /* Create and initialize array for Phi-node construction. */
1091 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1092 current_ir_graph->n_loc);
1093 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1095 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1097 if (!has_unknown) res = optimize_node (res);
1098 current_ir_graph->current_block = res;
1100 irn_vrfy_irg (res, current_ir_graph);
1105 /* ***********************************************************************/
1106 /* Methods necessary for automatic Phi node creation */
1108 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1109 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1110 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1111 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1113 Call Graph: ( A ---> B == A "calls" B)
1115 get_value mature_block
1123 get_r_value_internal |
1127 new_rd_Phi0 new_rd_Phi_in
1129 * *************************************************************************** */
1131 /** Creates a Phi node with 0 predecessors */
1132 static INLINE ir_node *
1133 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1136 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1137 irn_vrfy_irg (res, irg);
1141 /* There are two implementations of the Phi node construction. The first
1142 is faster, but does not work for blocks with more than 2 predecessors.
1143 The second works always but is slower and causes more unnecessary Phi
1145 Select the implementations by the following preprocessor flag set in
1147 #if USE_FAST_PHI_CONSTRUCTION
1149 /* This is a stack used for allocating and deallocating nodes in
1150 new_rd_Phi_in. The original implementation used the obstack
1151 to model this stack, now it is explicit. This reduces side effects.
1153 #if USE_EXPLICIT_PHI_IN_STACK
1154 INLINE Phi_in_stack *
1155 new_Phi_in_stack(void) {
1158 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1160 res->stack = NEW_ARR_F (ir_node *, 1);
1167 free_Phi_in_stack(Phi_in_stack *s) {
1168 DEL_ARR_F(s->stack);
1172 free_to_Phi_in_stack(ir_node *phi) {
1173 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1174 current_ir_graph->Phi_in_stack->pos)
1175 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1177 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1179 (current_ir_graph->Phi_in_stack->pos)++;
1182 static INLINE ir_node *
1183 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1184 int arity, ir_node **in) {
1186 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1187 int pos = current_ir_graph->Phi_in_stack->pos;
1191 /* We need to allocate a new node */
1192 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1193 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1195 /* reuse the old node and initialize it again. */
1198 assert (res->kind == k_ir_node);
1199 assert (res->op == op_Phi);
1203 assert (arity >= 0);
1204 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1205 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1207 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1209 (current_ir_graph->Phi_in_stack->pos)--;
1213 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1215 /* Creates a Phi node with a given, fixed array **in of predecessors.
1216 If the Phi node is unnecessary, as the same value reaches the block
1217 through all control flow paths, it is eliminated and the value
1218 returned directly. This constructor is only intended for use in
1219 the automatic Phi node generation triggered by get_value or mature.
1220 The implementation is quite tricky and depends on the fact, that
1221 the nodes are allocated on a stack:
1222 The in array contains predecessors and NULLs. The NULLs appear,
1223 if get_r_value_internal, that computed the predecessors, reached
1224 the same block on two paths. In this case the same value reaches
1225 this block on both paths, there is no definition in between. We need
1226 not allocate a Phi where these path's merge, but we have to communicate
1227 this fact to the caller. This happens by returning a pointer to the
1228 node the caller _will_ allocate. (Yes, we predict the address. We can
1229 do so because the nodes are allocated on the obstack.) The caller then
1230 finds a pointer to itself and, when this routine is called again,
1233 static INLINE ir_node *
1234 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1237 ir_node *res, *known;
1239 /* Allocate a new node on the obstack. This can return a node to
1240 which some of the pointers in the in-array already point.
1241 Attention: the constructor copies the in array, i.e., the later
1242 changes to the array in this routine do not affect the
1243 constructed node! If the in array contains NULLs, there will be
1244 missing predecessors in the returned node. Is this a possible
1245 internal state of the Phi node generation? */
1246 #if USE_EXPLICIT_PHI_IN_STACK
1247 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1249 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1250 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1253 /* The in-array can contain NULLs. These were returned by
1254 get_r_value_internal if it reached the same block/definition on a
1255 second path. The NULLs are replaced by the node itself to
1256 simplify the test in the next loop. */
1257 for (i = 0; i < ins; ++i) {
1262 /* This loop checks whether the Phi has more than one predecessor.
1263 If so, it is a real Phi node and we break the loop. Else the Phi
1264 node merges the same definition on several paths and therefore is
1266 for (i = 0; i < ins; ++i)
1268 if (in[i] == res || in[i] == known) continue;
1276 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1278 #if USE_EXPLICIT_PHI_IN_STACK
1279 free_to_Phi_in_stack(res);
1281 obstack_free (current_ir_graph->obst, res);
1285 res = optimize_node (res);
1286 irn_vrfy_irg (res, irg);
1289 /* return the pointer to the Phi node. This node might be deallocated! */
1294 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1297 allocates and returns this node. The routine called to allocate the
1298 node might optimize it away and return a real value, or even a pointer
1299 to a deallocated Phi node on top of the obstack!
1300 This function is called with an in-array of proper size. **/
1302 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1304 ir_node *prevBlock, *res;
1307 /* This loop goes to all predecessor blocks of the block the Phi node is in
1308 and there finds the operands of the Phi node by calling
1309 get_r_value_internal. */
1310 for (i = 1; i <= ins; ++i) {
1311 assert (block->in[i]);
1312 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1314 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1317 /* After collecting all predecessors into the array nin a new Phi node
1318 with these predecessors is created. This constructor contains an
1319 optimization: If all predecessors of the Phi node are identical it
1320 returns the only operand instead of a new Phi node. If the value
1321 passes two different control flow edges without being defined, and
1322 this is the second path treated, a pointer to the node that will be
1323 allocated for the first path (recursion) is returned. We already
1324 know the address of this node, as it is the next node to be allocated
1325 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1326 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1328 /* Now we now the value for "pos" and can enter it in the array with
1329 all known local variables. Attention: this might be a pointer to
1330 a node, that later will be allocated!!! See new_rd_Phi_in.
1331 If this is called in mature, after some set_value in the same block,
1332 the proper value must not be overwritten:
1334 get_value (makes Phi0, put's it into graph_arr)
1335 set_value (overwrites Phi0 in graph_arr)
1336 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1339 if (!block->attr.block.graph_arr[pos]) {
1340 block->attr.block.graph_arr[pos] = res;
1342 /* printf(" value already computed by %s\n",
1343 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1349 /* This function returns the last definition of a variable. In case
1350 this variable was last defined in a previous block, Phi nodes are
1351 inserted. If the part of the firm graph containing the definition
1352 is not yet constructed, a dummy Phi node is returned. */
1354 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1357 /* There are 4 cases to treat.
1359 1. The block is not mature and we visit it the first time. We can not
1360 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1361 predecessors is returned. This node is added to the linked list (field
1362 "link") of the containing block to be completed when this block is
1363 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1366 2. The value is already known in this block, graph_arr[pos] is set and we
1367 visit the block the first time. We can return the value without
1368 creating any new nodes.
1370 3. The block is mature and we visit it the first time. A Phi node needs
1371 to be created (phi_merge). If the Phi is not needed, as all it's
1372 operands are the same value reaching the block through different
1373 paths, it's optimized away and the value itself is returned.
1375 4. The block is mature, and we visit it the second time. Now two
1376 subcases are possible:
1377 * The value was computed completely the last time we were here. This
1378 is the case if there is no loop. We can return the proper value.
1379 * The recursion that visited this node and set the flag did not
1380 return yet. We are computing a value in a loop and need to
1381 break the recursion without knowing the result yet.
1382 @@@ strange case. Straight forward we would create a Phi before
1383 starting the computation of it's predecessors. In this case we will
1384 find a Phi here in any case. The problem is that this implementation
1385 only creates a Phi after computing the predecessors, so that it is
1386 hard to compute self references of this Phi. @@@
1387 There is no simple check for the second subcase. Therefore we check
1388 for a second visit and treat all such cases as the second subcase.
1389 Anyways, the basic situation is the same: we reached a block
1390 on two paths without finding a definition of the value: No Phi
1391 nodes are needed on both paths.
1392 We return this information "Two paths, no Phi needed" by a very tricky
1393 implementation that relies on the fact that an obstack is a stack and
1394 will return a node with the same address on different allocations.
1395 Look also at phi_merge and new_rd_phi_in to understand this.
1396 @@@ Unfortunately this does not work, see testprogram
1397 three_cfpred_example.
1401 /* case 4 -- already visited. */
1402 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1404 /* visited the first time */
1405 set_irn_visited(block, get_irg_visited(current_ir_graph));
1407 /* Get the local valid value */
1408 res = block->attr.block.graph_arr[pos];
1410 /* case 2 -- If the value is actually computed, return it. */
1411 if (res) return res;
1413 if (block->attr.block.matured) { /* case 3 */
1415 /* The Phi has the same amount of ins as the corresponding block. */
1416 int ins = get_irn_arity(block);
1418 NEW_ARR_A (ir_node *, nin, ins);
1420 /* Phi merge collects the predecessors and then creates a node. */
1421 res = phi_merge (block, pos, mode, nin, ins);
1423 } else { /* case 1 */
1424 /* The block is not mature, we don't know how many in's are needed. A Phi
1425 with zero predecessors is created. Such a Phi node is called Phi0
1426 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1427 to the list of Phi0 nodes in this block to be matured by mature_block
1429 The Phi0 has to remember the pos of it's internal value. If the real
1430 Phi is computed, pos is used to update the array with the local
1433 res = new_rd_Phi0 (current_ir_graph, block, mode);
1434 res->attr.phi0_pos = pos;
1435 res->link = block->link;
1439 /* If we get here, the frontend missed a use-before-definition error */
1442 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1443 assert (mode->code >= irm_F && mode->code <= irm_P);
1444 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1445 tarval_mode_null[mode->code]);
1448 /* The local valid value is available now. */
1449 block->attr.block.graph_arr[pos] = res;
1457 it starts the recursion. This causes an Id at the entry of
1458 every block that has no definition of the value! **/
1460 #if USE_EXPLICIT_PHI_IN_STACK
1462 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1463 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1466 static INLINE ir_node *
1467 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1468 ir_node **in, int ins, ir_node *phi0)
1471 ir_node *res, *known;
1473 /* Allocate a new node on the obstack. The allocation copies the in
1475 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1476 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1478 /* This loop checks whether the Phi has more than one predecessor.
1479 If so, it is a real Phi node and we break the loop. Else the
1480 Phi node merges the same definition on several paths and therefore
1481 is not needed. Don't consider Bad nodes! */
1483 for (i=0; i < ins; ++i)
1487 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1489 /* Optimize self referencing Phis: We can't detect them yet properly, as
1490 they still refer to the Phi0 they will replace. So replace right now. */
1491 if (phi0 && in[i] == phi0) in[i] = res;
1493 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1501 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1504 obstack_free (current_ir_graph->obst, res);
1505 if (is_Phi(known)) {
1506 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1507 order, an enclosing Phi know may get superfluous. */
1508 res = optimize_in_place_2(known);
1509 if (res != known) { exchange(known, res); }
1514 /* A undefined value, e.g., in unreachable code. */
1518 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1519 irn_vrfy_irg (res, irg);
1520 /* Memory Phis in endless loops must be kept alive.
1521 As we can't distinguish these easily we keep all of them alive. */
1522 if ((res->op == op_Phi) && (mode == mode_M))
1523 add_End_keepalive(irg->end, res);
1530 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1532 #if PRECISE_EXC_CONTEXT
1534 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1536 /* Construct a new frag_array for node n.
1537 Copy the content from the current graph_arr of the corresponding block:
1538 this is the current state.
1539 Set ProjM(n) as current memory state.
1540 Further the last entry in frag_arr of current block points to n. This
1541 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1543 static INLINE ir_node ** new_frag_arr (ir_node *n)
1548 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1549 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1550 sizeof(ir_node *)*current_ir_graph->n_loc);
1552 /* turn off optimization before allocating Proj nodes, as res isn't
1554 opt = get_opt_optimize(); set_optimize(0);
1555 /* Here we rely on the fact that all frag ops have Memory as first result! */
1556 if (get_irn_op(n) == op_Call)
1557 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1559 assert((pn_Quot_M == pn_DivMod_M) &&
1560 (pn_Quot_M == pn_Div_M) &&
1561 (pn_Quot_M == pn_Mod_M) &&
1562 (pn_Quot_M == pn_Load_M) &&
1563 (pn_Quot_M == pn_Store_M) &&
1564 (pn_Quot_M == pn_Alloc_M) );
1565 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1569 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1573 static INLINE ir_node **
1574 get_frag_arr (ir_node *n) {
1575 if (get_irn_op(n) == op_Call) {
1576 return n->attr.call.frag_arr;
1577 } else if (get_irn_op(n) == op_Alloc) {
1578 return n->attr.a.frag_arr;
1580 return n->attr.frag_arr;
1585 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1587 if (!frag_arr[pos]) frag_arr[pos] = val;
1588 if (frag_arr[current_ir_graph->n_loc - 1]) {
1589 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1590 assert(arr != frag_arr && "Endless recursion detected");
1591 set_frag_value(arr, pos, val);
1596 for (i = 0; i < 1000; ++i) {
1597 if (!frag_arr[pos]) {
1598 frag_arr[pos] = val;
1600 if (frag_arr[current_ir_graph->n_loc - 1]) {
1601 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1607 assert(0 && "potential endless recursion");
1612 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1616 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1618 frag_arr = get_frag_arr(cfOp);
1619 res = frag_arr[pos];
1621 if (block->attr.block.graph_arr[pos]) {
1622 /* There was a set_value after the cfOp and no get_value before that
1623 set_value. We must build a Phi node now. */
1624 if (block->attr.block.matured) {
1625 int ins = get_irn_arity(block);
1627 NEW_ARR_A (ir_node *, nin, ins);
1628 res = phi_merge(block, pos, mode, nin, ins);
1630 res = new_rd_Phi0 (current_ir_graph, block, mode);
1631 res->attr.phi0_pos = pos;
1632 res->link = block->link;
1636 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1637 but this should be better: (remove comment if this works) */
1638 /* It's a Phi, we can write this into all graph_arrs with NULL */
1639 set_frag_value(block->attr.block.graph_arr, pos, res);
1641 res = get_r_value_internal(block, pos, mode);
1642 set_frag_value(block->attr.block.graph_arr, pos, res);
1650 computes the predecessors for the real phi node, and then
1651 allocates and returns this node. The routine called to allocate the
1652 node might optimize it away and return a real value.
1653 This function must be called with an in-array of proper size. **/
1655 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1657 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1660 /* If this block has no value at pos create a Phi0 and remember it
1661 in graph_arr to break recursions.
1662 Else we may not set graph_arr as there a later value is remembered. */
1664 if (!block->attr.block.graph_arr[pos]) {
1665 if (block == get_irg_start_block(current_ir_graph)) {
1666 /* Collapsing to Bad tarvals is no good idea.
1667 So we call a user-supplied routine here that deals with this case as
1668 appropriate for the given language. Sorryly the only help we can give
1669 here is the position.
1671 Even if all variables are defined before use, it can happen that
1672 we get to the start block, if a cond has been replaced by a tuple
1673 (bad, jmp). In this case we call the function needlessly, eventually
1674 generating an non existant error.
1675 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1678 if (default_initialize_local_variable)
1679 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1681 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1682 /* We don't need to care about exception ops in the start block.
1683 There are none by definition. */
1684 return block->attr.block.graph_arr[pos];
1686 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1687 block->attr.block.graph_arr[pos] = phi0;
1688 #if PRECISE_EXC_CONTEXT
1689 if (get_opt_precise_exc_context()) {
1690 /* Set graph_arr for fragile ops. Also here we should break recursion.
1691 We could choose a cyclic path through an cfop. But the recursion would
1692 break at some point. */
1693 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1699 /* This loop goes to all predecessor blocks of the block the Phi node
1700 is in and there finds the operands of the Phi node by calling
1701 get_r_value_internal. */
1702 for (i = 1; i <= ins; ++i) {
1703 prevCfOp = skip_Proj(block->in[i]);
1705 if (is_Bad(prevCfOp)) {
1706 /* In case a Cond has been optimized we would get right to the start block
1707 with an invalid definition. */
1708 nin[i-1] = new_Bad();
1711 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1713 if (!is_Bad(prevBlock)) {
1714 #if PRECISE_EXC_CONTEXT
1715 if (get_opt_precise_exc_context() &&
1716 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1717 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1718 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1721 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1723 nin[i-1] = new_Bad();
1727 /* We want to pass the Phi0 node to the constructor: this finds additional
1728 optimization possibilities.
1729 The Phi0 node either is allocated in this function, or it comes from
1730 a former call to get_r_value_internal. In this case we may not yet
1731 exchange phi0, as this is done in mature_block. */
1733 phi0_all = block->attr.block.graph_arr[pos];
1734 if (!((get_irn_op(phi0_all) == op_Phi) &&
1735 (get_irn_arity(phi0_all) == 0) &&
1736 (get_nodes_block(phi0_all) == block)))
1742 /* After collecting all predecessors into the array nin a new Phi node
1743 with these predecessors is created. This constructor contains an
1744 optimization: If all predecessors of the Phi node are identical it
1745 returns the only operand instead of a new Phi node. */
1746 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1748 /* In case we allocated a Phi0 node at the beginning of this procedure,
1749 we need to exchange this Phi0 with the real Phi. */
1751 exchange(phi0, res);
1752 block->attr.block.graph_arr[pos] = res;
1753 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1754 only an optimization. */
1760 /* This function returns the last definition of a variable. In case
1761 this variable was last defined in a previous block, Phi nodes are
1762 inserted. If the part of the firm graph containing the definition
1763 is not yet constructed, a dummy Phi node is returned. */
1765 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1768 /* There are 4 cases to treat.
1770 1. The block is not mature and we visit it the first time. We can not
1771 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1772 predecessors is returned. This node is added to the linked list (field
1773 "link") of the containing block to be completed when this block is
1774 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1777 2. The value is already known in this block, graph_arr[pos] is set and we
1778 visit the block the first time. We can return the value without
1779 creating any new nodes.
1781 3. The block is mature and we visit it the first time. A Phi node needs
1782 to be created (phi_merge). If the Phi is not needed, as all it's
1783 operands are the same value reaching the block through different
1784 paths, it's optimized away and the value itself is returned.
1786 4. The block is mature, and we visit it the second time. Now two
1787 subcases are possible:
1788 * The value was computed completely the last time we were here. This
1789 is the case if there is no loop. We can return the proper value.
1790 * The recursion that visited this node and set the flag did not
1791 return yet. We are computing a value in a loop and need to
1792 break the recursion. This case only happens if we visited
1793 the same block with phi_merge before, which inserted a Phi0.
1794 So we return the Phi0.
1797 /* case 4 -- already visited. */
1798 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1799 /* As phi_merge allocates a Phi0 this value is always defined. Here
1800 is the critical difference of the two algorithms. */
1801 assert(block->attr.block.graph_arr[pos]);
1802 return block->attr.block.graph_arr[pos];
1805 /* visited the first time */
1806 set_irn_visited(block, get_irg_visited(current_ir_graph));
1808 /* Get the local valid value */
1809 res = block->attr.block.graph_arr[pos];
1811 /* case 2 -- If the value is actually computed, return it. */
1812 if (res) { return res; };
1814 if (block->attr.block.matured) { /* case 3 */
1816 /* The Phi has the same amount of ins as the corresponding block. */
1817 int ins = get_irn_arity(block);
1819 NEW_ARR_A (ir_node *, nin, ins);
1821 /* Phi merge collects the predecessors and then creates a node. */
1822 res = phi_merge (block, pos, mode, nin, ins);
1824 } else { /* case 1 */
1825 /* The block is not mature, we don't know how many in's are needed. A Phi
1826 with zero predecessors is created. Such a Phi node is called Phi0
1827 node. The Phi0 is then added to the list of Phi0 nodes in this block
1828 to be matured by mature_block later.
1829 The Phi0 has to remember the pos of it's internal value. If the real
1830 Phi is computed, pos is used to update the array with the local
1832 res = new_rd_Phi0 (current_ir_graph, block, mode);
1833 res->attr.phi0_pos = pos;
1834 res->link = block->link;
1838 /* If we get here, the frontend missed a use-before-definition error */
1841 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1842 assert (mode->code >= irm_F && mode->code <= irm_P);
1843 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1844 get_mode_null(mode));
1847 /* The local valid value is available now. */
1848 block->attr.block.graph_arr[pos] = res;
1853 #endif /* USE_FAST_PHI_CONSTRUCTION */
1855 /* ************************************************************************** */
1857 /** Finalize a Block node, when all control flows are known. */
1858 /** Acceptable parameters are only Block nodes. */
1860 mature_block (ir_node *block)
1867 assert (get_irn_opcode(block) == iro_Block);
1868 /* @@@ should be commented in
1869 assert (!get_Block_matured(block) && "Block already matured"); */
1871 if (!get_Block_matured(block)) {
1872 ins = ARR_LEN (block->in)-1;
1873 /* Fix block parameters */
1874 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1876 /* An array for building the Phi nodes. */
1877 NEW_ARR_A (ir_node *, nin, ins);
1879 /* Traverse a chain of Phi nodes attached to this block and mature
1881 for (n = block->link; n; n=next) {
1882 inc_irg_visited(current_ir_graph);
1884 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1887 block->attr.block.matured = 1;
1889 /* Now, as the block is a finished firm node, we can optimize it.
1890 Since other nodes have been allocated since the block was created
1891 we can not free the node on the obstack. Therefore we have to call
1893 Unfortunately the optimization does not change a lot, as all allocated
1894 nodes refer to the unoptimized node.
1895 We can call _2, as global cse has no effect on blocks. */
1896 block = optimize_in_place_2(block);
1897 irn_vrfy_irg(block, current_ir_graph);
1902 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1904 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1909 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1911 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1916 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1918 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1924 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1926 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1931 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1933 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1938 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1941 assert(arg->op == op_Cond);
1942 arg->attr.c.kind = fragmentary;
1943 arg->attr.c.default_proj = max_proj;
1944 res = new_Proj (arg, mode_X, max_proj);
1949 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1951 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1956 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1958 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1962 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1964 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1969 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1971 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1976 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1978 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1984 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1986 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1991 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1993 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1998 * allocate the frag array
2000 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2001 if (get_opt_precise_exc_context()) {
2002 if ((current_ir_graph->phase_state == phase_building) &&
2003 (get_irn_op(res) == op) && /* Could be optimized away. */
2004 !*frag_store) /* Could be a cse where the arr is already set. */ {
2005 *frag_store = new_frag_arr(res);
2012 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2015 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2017 #if PRECISE_EXC_CONTEXT
2018 allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */
2025 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2028 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2030 #if PRECISE_EXC_CONTEXT
2031 allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */
2038 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2041 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2043 #if PRECISE_EXC_CONTEXT
2044 allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */
2051 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2054 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2056 #if PRECISE_EXC_CONTEXT
2057 allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */
2064 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2066 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2071 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2073 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2078 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2080 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2085 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2087 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2092 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2094 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2099 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2101 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2106 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2108 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2113 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2115 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2120 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2122 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2127 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2129 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2134 new_d_Jmp (dbg_info* db)
2136 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2140 new_d_Cond (dbg_info* db, ir_node *c)
2142 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2146 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2150 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2151 store, callee, arity, in, tp);
2152 #if PRECISE_EXC_CONTEXT
2153 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2160 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2162 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2167 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2169 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2174 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2177 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2179 #if PRECISE_EXC_CONTEXT
2180 allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */
2187 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2190 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2192 #if PRECISE_EXC_CONTEXT
2193 allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */
2200 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2204 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2205 store, size, alloc_type, where);
2206 #if PRECISE_EXC_CONTEXT
2207 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2214 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2216 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2217 store, ptr, size, free_type);
2221 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2222 /* GL: objptr was called frame before. Frame was a bad choice for the name
2223 as the operand could as well be a pointer to a dynamic object. */
2225 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2226 store, objptr, 0, NULL, ent);
2230 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2232 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2233 store, objptr, n_index, index, sel);
2237 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2239 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2240 store, objptr, ent));
2244 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2246 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2251 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2253 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2258 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2260 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2268 return current_ir_graph->bad;
2272 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2274 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2279 new_d_Unknown (ir_mode *m)
2281 return new_rd_Unknown(current_ir_graph, m);
2285 new_d_CallBegin (dbg_info *db, ir_node *call)
2288 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2293 new_d_EndReg (dbg_info *db)
2296 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2301 new_d_EndExcept (dbg_info *db)
2304 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2309 new_d_Break (dbg_info *db)
2311 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2315 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2317 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2322 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2326 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2327 callee, arity, in, tp);
2332 /* ********************************************************************* */
2333 /* Comfortable interface with automatic Phi node construction. */
2334 /* (Uses also constructors of ?? interface, except new_Block. */
2335 /* ********************************************************************* */
2337 /* * Block construction **/
2338 /* immature Block without predecessors */
2339 ir_node *new_d_immBlock (dbg_info* db) {
2342 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2343 /* creates a new dynamic in-array as length of in is -1 */
2344 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2345 current_ir_graph->current_block = res;
2346 res->attr.block.matured = 0;
2347 /* res->attr.block.exc = exc_normal; */
2348 /* res->attr.block.handler_entry = 0; */
2349 res->attr.block.irg = current_ir_graph;
2350 res->attr.block.backedge = NULL;
2351 res->attr.block.in_cg = NULL;
2352 res->attr.block.cg_backedge = NULL;
2353 set_Block_block_visited(res, 0);
2355 /* Create and initialize array for Phi-node construction. */
2356 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2357 current_ir_graph->n_loc);
2358 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2360 /* Immature block may not be optimized! */
2361 irn_vrfy_irg (res, current_ir_graph);
2368 return new_d_immBlock(NULL);
2371 /* add an adge to a jmp/control flow node */
2373 add_in_edge (ir_node *block, ir_node *jmp)
2375 if (block->attr.block.matured) {
2376 assert(0 && "Error: Block already matured!\n");
2379 assert (jmp != NULL);
2380 ARR_APP1 (ir_node *, block->in, jmp);
2384 /* changing the current block */
2386 switch_block (ir_node *target)
2388 current_ir_graph->current_block = target;
2391 /* ************************ */
2392 /* parameter administration */
2394 /* get a value from the parameter array from the current block by its index */
2396 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2398 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2399 inc_irg_visited(current_ir_graph);
2401 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2403 /* get a value from the parameter array from the current block by its index */
2405 get_value (int pos, ir_mode *mode)
2407 return get_d_value(NULL, pos, mode);
2410 /* set a value at position pos in the parameter array from the current block */
2412 set_value (int pos, ir_node *value)
2414 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2415 assert(pos+1 < current_ir_graph->n_loc);
2416 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2419 /* get the current store */
2423 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2424 /* GL: one could call get_value instead */
2425 inc_irg_visited(current_ir_graph);
2426 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2429 /* set the current store */
2431 set_store (ir_node *store)
2433 /* GL: one could call set_value instead */
2434 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2435 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2439 keep_alive (ir_node *ka)
2441 add_End_keepalive(current_ir_graph->end, ka);
2444 /** Useful access routines **/
2445 /* Returns the current block of the current graph. To set the current
2446 block use switch_block(). */
2447 ir_node *get_cur_block() {
2448 return get_irg_current_block(current_ir_graph);
2451 /* Returns the frame type of the current graph */
2452 type *get_cur_frame_type() {
2453 return get_irg_frame_type(current_ir_graph);
2457 /* ********************************************************************* */
2460 /* call once for each run of the library */
2462 init_cons (default_initialize_local_variable_func_t *func)
2464 default_initialize_local_variable = func;
2467 /* call for each graph */
2469 finalize_cons (ir_graph *irg) {
2470 irg->phase_state = phase_high;
2474 ir_node *new_Block(int arity, ir_node **in) {
2475 return new_d_Block(NULL, arity, in);
2477 ir_node *new_Start (void) {
2478 return new_d_Start(NULL);
2480 ir_node *new_End (void) {
2481 return new_d_End(NULL);
2483 ir_node *new_Jmp (void) {
2484 return new_d_Jmp(NULL);
2486 ir_node *new_Cond (ir_node *c) {
2487 return new_d_Cond(NULL, c);
2489 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2490 return new_d_Return(NULL, store, arity, in);
2492 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2493 return new_d_Raise(NULL, store, obj);
2495 ir_node *new_Const (ir_mode *mode, tarval *con) {
2496 return new_d_Const(NULL, mode, con);
2498 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2499 return new_d_SymConst(NULL, value, kind);
2501 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2502 return new_d_simpleSel(NULL, store, objptr, ent);
2504 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2506 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2508 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2509 return new_d_InstOf (NULL, store, objptr, ent);
2511 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2513 return new_d_Call(NULL, store, callee, arity, in, tp);
2515 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2516 return new_d_Add(NULL, op1, op2, mode);
2518 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2519 return new_d_Sub(NULL, op1, op2, mode);
2521 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2522 return new_d_Minus(NULL, op, mode);
2524 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2525 return new_d_Mul(NULL, op1, op2, mode);
2527 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2528 return new_d_Quot(NULL, memop, op1, op2);
2530 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2531 return new_d_DivMod(NULL, memop, op1, op2);
2533 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2534 return new_d_Div(NULL, memop, op1, op2);
2536 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2537 return new_d_Mod(NULL, memop, op1, op2);
2539 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2540 return new_d_Abs(NULL, op, mode);
2542 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2543 return new_d_And(NULL, op1, op2, mode);
2545 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2546 return new_d_Or(NULL, op1, op2, mode);
2548 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2549 return new_d_Eor(NULL, op1, op2, mode);
2551 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2552 return new_d_Not(NULL, op, mode);
2554 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2555 return new_d_Shl(NULL, op, k, mode);
2557 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2558 return new_d_Shr(NULL, op, k, mode);
2560 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2561 return new_d_Shrs(NULL, op, k, mode);
2563 #define new_Rotate new_Rot
2564 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2565 return new_d_Rot(NULL, op, k, mode);
2567 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2568 return new_d_Cmp(NULL, op1, op2);
2570 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2571 return new_d_Conv(NULL, op, mode);
2573 ir_node *new_Cast (ir_node *op, type *to_tp) {
2574 return new_d_Cast(NULL, op, to_tp);
2576 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2577 return new_d_Phi(NULL, arity, in, mode);
2579 ir_node *new_Load (ir_node *store, ir_node *addr) {
2580 return new_d_Load(NULL, store, addr);
2582 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2583 return new_d_Store(NULL, store, addr, val);
2585 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2586 where_alloc where) {
2587 return new_d_Alloc(NULL, store, size, alloc_type, where);
2589 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2591 return new_d_Free(NULL, store, ptr, size, free_type);
2593 ir_node *new_Sync (int arity, ir_node **in) {
2594 return new_d_Sync(NULL, arity, in);
2596 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2597 return new_d_Proj(NULL, arg, mode, proj);
2599 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2600 return new_d_defaultProj(NULL, arg, max_proj);
2602 ir_node *new_Tuple (int arity, ir_node **in) {
2603 return new_d_Tuple(NULL, arity, in);
2605 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2606 return new_d_Id(NULL, val, mode);
2608 ir_node *new_Bad (void) {
2611 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2612 return new_d_Confirm (NULL, val, bound, cmp);
2614 ir_node *new_Unknown(ir_mode *m) {
2615 return new_d_Unknown(m);
2617 ir_node *new_CallBegin (ir_node *callee) {
2618 return new_d_CallBegin(NULL, callee);
2620 ir_node *new_EndReg (void) {
2621 return new_d_EndReg(NULL);
2623 ir_node *new_EndExcept (void) {
2624 return new_d_EndExcept(NULL);
2626 ir_node *new_Break (void) {
2627 return new_d_Break(NULL);
2629 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2630 return new_d_Filter(NULL, arg, mode, proj);
2632 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2633 return new_d_FuncCall(NULL, callee, arity, in, tp);