3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 /* res->attr.block.exc = exc_normal; */
64 /* res->attr.block.handler_entry = 0; */
65 res->attr.block.irg = irg;
66 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
67 res->attr.block.in_cg = NULL;
68 res->attr.block.cg_backedge = NULL;
70 irn_vrfy_irg (res, irg);
75 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
79 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
80 /* res->attr.start.irg = irg; */
82 irn_vrfy_irg (res, irg);
87 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
91 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
93 irn_vrfy_irg (res, irg);
97 /* Creates a Phi node with all predecessors. Calling this constructor
98 is only allowed if the corresponding block is mature. */
100 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
104 bool has_unknown = false;
106 /* Don't assert that block matured: the use of this constructor is strongly
108 if ( get_Block_matured(block) )
109 assert( get_irn_arity(block) == arity );
111 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
113 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
115 for (i = arity-1; i >= 0; i--)
116 if (get_irn_op(in[i]) == op_Unknown) {
121 if (!has_unknown) res = optimize_node (res);
122 irn_vrfy_irg (res, irg);
124 /* Memory Phis in endless loops must be kept alive.
125 As we can't distinguish these easily we keep all of them alive. */
126 if ((res->op == op_Phi) && (mode == mode_M))
127 add_End_keepalive(irg->end, res);
132 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
135 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
136 res->attr.con.tv = con;
137 set_Const_type(res, tp); /* Call method because of complex assertion. */
138 res = optimize_node (res);
139 assert(get_Const_type(res) == tp);
140 irn_vrfy_irg (res, irg);
146 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
148 type *tp = unknown_type;
149 /* removing this somehow causes errors in jack. */
150 if (tarval_is_entity(con))
151 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
153 return new_rd_Const_type (db, irg, block, mode, con, tp);
157 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
162 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
163 res = optimize_node (res);
164 irn_vrfy_irg (res, irg);
169 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
175 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
176 res->attr.proj = proj;
179 assert(get_Proj_pred(res));
180 assert(get_nodes_Block(get_Proj_pred(res)));
182 res = optimize_node (res);
184 irn_vrfy_irg (res, irg);
190 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
194 assert(arg->op == op_Cond);
195 arg->attr.c.kind = fragmentary;
196 arg->attr.c.default_proj = max_proj;
197 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
202 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
207 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
208 res = optimize_node (res);
209 irn_vrfy_irg (res, irg);
214 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
217 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
218 res->attr.cast.totype = to_tp;
219 res = optimize_node (res);
220 irn_vrfy_irg (res, irg);
225 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
229 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
230 res = optimize_node (res);
231 irn_vrfy_irg (res, irg);
236 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
237 ir_node *op1, ir_node *op2, ir_mode *mode)
243 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
244 res = optimize_node (res);
245 irn_vrfy_irg (res, irg);
250 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
251 ir_node *op1, ir_node *op2, ir_mode *mode)
257 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
258 res = optimize_node (res);
259 irn_vrfy_irg (res, irg);
264 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
265 ir_node *op, ir_mode *mode)
270 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
271 res = optimize_node (res);
272 irn_vrfy_irg (res, irg);
277 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
278 ir_node *op1, ir_node *op2, ir_mode *mode)
284 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
285 res = optimize_node (res);
286 irn_vrfy_irg (res, irg);
291 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
292 ir_node *memop, ir_node *op1, ir_node *op2)
299 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
300 res = optimize_node (res);
301 irn_vrfy_irg (res, irg);
306 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
307 ir_node *memop, ir_node *op1, ir_node *op2)
314 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
315 res = optimize_node (res);
316 irn_vrfy_irg (res, irg);
321 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
322 ir_node *memop, ir_node *op1, ir_node *op2)
329 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
330 res = optimize_node (res);
331 irn_vrfy_irg (res, irg);
336 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
337 ir_node *memop, ir_node *op1, ir_node *op2)
344 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
345 res = optimize_node (res);
346 irn_vrfy_irg (res, irg);
351 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
352 ir_node *op1, ir_node *op2, ir_mode *mode)
358 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
359 res = optimize_node (res);
360 irn_vrfy_irg (res, irg);
365 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
366 ir_node *op1, ir_node *op2, ir_mode *mode)
372 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
373 res = optimize_node (res);
374 irn_vrfy_irg (res, irg);
379 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
380 ir_node *op1, ir_node *op2, ir_mode *mode)
386 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
387 res = optimize_node (res);
388 irn_vrfy_irg (res, irg);
393 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
394 ir_node *op, ir_mode *mode)
399 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
400 res = optimize_node (res);
401 irn_vrfy_irg (res, irg);
406 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
407 ir_node *op, ir_node *k, ir_mode *mode)
413 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
414 res = optimize_node (res);
415 irn_vrfy_irg (res, irg);
420 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
421 ir_node *op, ir_node *k, ir_mode *mode)
427 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
428 res = optimize_node (res);
429 irn_vrfy_irg (res, irg);
434 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
435 ir_node *op, ir_node *k, ir_mode *mode)
441 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
442 res = optimize_node (res);
443 irn_vrfy_irg (res, irg);
448 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
449 ir_node *op, ir_node *k, ir_mode *mode)
455 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
456 res = optimize_node (res);
457 irn_vrfy_irg (res, irg);
462 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
463 ir_node *op, ir_mode *mode)
468 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
469 res = optimize_node (res);
470 irn_vrfy_irg (res, irg);
475 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
476 ir_node *op1, ir_node *op2)
482 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
483 res = optimize_node (res);
484 irn_vrfy_irg (res, irg);
489 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
492 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
493 res = optimize_node (res);
494 irn_vrfy_irg (res, irg);
499 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
504 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
505 res->attr.c.kind = dense;
506 res->attr.c.default_proj = 0;
507 res = optimize_node (res);
508 irn_vrfy_irg (res, irg);
513 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
514 ir_node *callee, int arity, ir_node **in, type *tp)
521 NEW_ARR_A (ir_node *, r_in, r_arity);
524 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
526 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
528 assert(is_method_type(tp));
529 set_Call_type(res, tp);
530 res->attr.call.callee_arr = NULL;
531 res = optimize_node (res);
532 irn_vrfy_irg (res, irg);
537 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
538 ir_node *store, int arity, ir_node **in)
545 NEW_ARR_A (ir_node *, r_in, r_arity);
547 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
548 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
549 res = optimize_node (res);
550 irn_vrfy_irg (res, irg);
555 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
561 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
562 res = optimize_node (res);
563 irn_vrfy_irg (res, irg);
568 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
569 ir_node *store, ir_node *adr)
575 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
577 res = optimize_node (res);
578 irn_vrfy_irg (res, irg);
583 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
584 ir_node *store, ir_node *adr, ir_node *val)
591 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
593 res = optimize_node (res);
595 irn_vrfy_irg (res, irg);
600 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
601 ir_node *size, type *alloc_type, where_alloc where)
607 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
609 res->attr.a.where = where;
610 res->attr.a.type = alloc_type;
612 res = optimize_node (res);
613 irn_vrfy_irg (res, irg);
618 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
619 ir_node *ptr, ir_node *size, type *free_type)
626 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
628 res->attr.f = free_type;
630 res = optimize_node (res);
631 irn_vrfy_irg (res, irg);
636 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
637 int arity, ir_node **in, entity *ent)
643 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
646 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
649 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
650 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
652 res->attr.s.ent = ent;
654 res = optimize_node (res);
655 irn_vrfy_irg (res, irg);
660 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
661 ir_node *objptr, type *ent)
668 NEW_ARR_A (ir_node *, r_in, r_arity);
672 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
674 res->attr.io.ent = ent;
676 /* res = optimize (res);
677 * irn_vrfy_irg (res, irg); */
682 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
683 symconst_kind symkind)
687 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
691 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
693 res->attr.i.num = symkind;
694 res->attr.i.sym = value;
696 if (symkind == symconst_addr_name) {
697 res->attr.i.sym.ident_p = (ident *)value;
698 } else if (symkind == symconst_addr_ent) {
699 res->attr.i.sym.entity = (entity *)value;
701 assert ( ( (symkind ==symconst_type_tag)
702 || (symkind == symconst_size))
703 && (is_type(value)));
704 res->attr.i.sym.typ = (type *)value;
707 res = optimize_node (res);
708 irn_vrfy_irg (res, irg);
713 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
717 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
719 res = optimize_node (res);
720 irn_vrfy_irg (res, irg);
725 new_rd_Bad (ir_graph *irg)
731 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
733 ir_node *in[2], *res;
737 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
739 res->attr.confirm_cmp = cmp;
741 res = optimize_node (res);
742 irn_vrfy_irg(res, irg);
747 new_rd_Unknown (ir_graph *irg, ir_mode *m)
749 return new_ir_node (NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
753 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
757 in[0] = get_Call_ptr(call);
758 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
759 /* res->attr.callbegin.irg = irg; */
760 res->attr.callbegin.call = call;
761 res = optimize_node (res);
762 irn_vrfy_irg (res, irg);
767 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
771 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
774 irn_vrfy_irg (res, irg);
779 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
783 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
784 irg->end_except = res;
786 irn_vrfy_irg (res, irg);
791 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
794 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
795 res = optimize_node (res);
796 irn_vrfy_irg (res, irg);
801 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
807 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
808 res->attr.filter.proj = proj;
809 res->attr.filter.in_cg = NULL;
810 res->attr.filter.backedge = NULL;
813 assert(get_Proj_pred(res));
814 assert(get_nodes_Block(get_Proj_pred(res)));
816 res = optimize_node (res);
818 irn_vrfy_irg (res, irg);
824 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
825 ir_node *callee, int arity, ir_node **in, type *tp)
832 NEW_ARR_A (ir_node *, r_in, r_arity);
834 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
836 res = new_ir_node (db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
838 assert(is_method_type(tp));
839 set_FuncCall_type(res, tp);
840 res->attr.call.callee_arr = NULL;
841 res = optimize_node (res);
842 irn_vrfy_irg (res, irg);
847 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
848 return new_rd_Block(NULL, irg, arity, in);
850 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
851 return new_rd_Start(NULL, irg, block);
853 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
854 return new_rd_End(NULL, irg, block);
856 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
857 return new_rd_Jmp(NULL, irg, block);
859 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
860 return new_rd_Cond(NULL, irg, block, c);
862 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
863 ir_node *store, int arity, ir_node **in) {
864 return new_rd_Return(NULL, irg, block, store, arity, in);
866 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
867 ir_node *store, ir_node *obj) {
868 return new_rd_Raise(NULL, irg, block, store, obj);
870 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
871 ir_mode *mode, tarval *con) {
872 return new_rd_Const(NULL, irg, block, mode, con);
874 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
875 symconst_symbol value, symconst_kind symkind) {
876 return new_rd_SymConst(NULL, irg, block, value, symkind);
878 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
879 ir_node *objptr, int n_index, ir_node **index,
881 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
883 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
885 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
887 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
888 ir_node *callee, int arity, ir_node **in,
890 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
892 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
893 ir_node *op1, ir_node *op2, ir_mode *mode) {
894 return new_rd_Add(NULL, irg, block, op1, op2, mode);
896 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
897 ir_node *op1, ir_node *op2, ir_mode *mode) {
898 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
900 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
901 ir_node *op, ir_mode *mode) {
902 return new_rd_Minus(NULL, irg, block, op, mode);
904 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
905 ir_node *op1, ir_node *op2, ir_mode *mode) {
906 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
908 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
909 ir_node *memop, ir_node *op1, ir_node *op2) {
910 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
912 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
913 ir_node *memop, ir_node *op1, ir_node *op2) {
914 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
916 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
917 ir_node *memop, ir_node *op1, ir_node *op2) {
918 return new_rd_Div(NULL, irg, block, memop, op1, op2);
920 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
921 ir_node *memop, ir_node *op1, ir_node *op2) {
922 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
924 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
925 ir_node *op, ir_mode *mode) {
926 return new_rd_Abs(NULL, irg, block, op, mode);
928 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
929 ir_node *op1, ir_node *op2, ir_mode *mode) {
930 return new_rd_And(NULL, irg, block, op1, op2, mode);
932 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
933 ir_node *op1, ir_node *op2, ir_mode *mode) {
934 return new_rd_Or(NULL, irg, block, op1, op2, mode);
936 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
937 ir_node *op1, ir_node *op2, ir_mode *mode) {
938 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
940 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
941 ir_node *op, ir_mode *mode) {
942 return new_rd_Not(NULL, irg, block, op, mode);
944 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
945 ir_node *op1, ir_node *op2) {
946 return new_rd_Cmp(NULL, irg, block, op1, op2);
948 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
949 ir_node *op, ir_node *k, ir_mode *mode) {
950 return new_rd_Shl(NULL, irg, block, op, k, mode);
952 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
953 ir_node *op, ir_node *k, ir_mode *mode) {
954 return new_rd_Shr(NULL, irg, block, op, k, mode);
956 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
957 ir_node *op, ir_node *k, ir_mode *mode) {
958 return new_rd_Shrs(NULL, irg, block, op, k, mode);
960 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
961 ir_node *op, ir_node *k, ir_mode *mode) {
962 return new_rd_Rot(NULL, irg, block, op, k, mode);
964 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
965 ir_node *op, ir_mode *mode) {
966 return new_rd_Conv(NULL, irg, block, op, mode);
968 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
969 return new_rd_Cast(NULL, irg, block, op, to_tp);
971 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
972 ir_node **in, ir_mode *mode) {
973 return new_rd_Phi(NULL, irg, block, arity, in, mode);
975 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
976 ir_node *store, ir_node *adr) {
977 return new_rd_Load(NULL, irg, block, store, adr);
979 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
980 ir_node *store, ir_node *adr, ir_node *val) {
981 return new_rd_Store(NULL, irg, block, store, adr, val);
983 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
984 ir_node *size, type *alloc_type, where_alloc where) {
985 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
987 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
988 ir_node *ptr, ir_node *size, type *free_type) {
989 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
991 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
992 return new_rd_Sync(NULL, irg, block, arity, in);
994 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
995 ir_mode *mode, long proj) {
996 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
998 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1000 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1002 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1003 int arity, ir_node **in) {
1004 return new_rd_Tuple(NULL, irg, block, arity, in );
1006 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1007 ir_node *val, ir_mode *mode) {
1008 return new_rd_Id(NULL, irg, block, val, mode);
1010 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1011 return new_rd_Bad(irg);
1013 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1014 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1016 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1017 return new_rd_Unknown(irg, m);
1019 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1020 return new_rd_CallBegin(NULL, irg, block, callee);
1022 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1023 return new_rd_EndReg(NULL, irg, block);
1025 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1026 return new_rd_EndExcept(NULL, irg, block);
1028 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1029 return new_rd_Break(NULL, irg, block);
1031 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1032 ir_mode *mode, long proj) {
1033 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1035 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1036 ir_node *callee, int arity, ir_node **in,
1038 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1042 /** ********************/
1043 /** public interfaces */
1044 /** construction tools */
1048 * - create a new Start node in the current block
1050 * @return s - pointer to the created Start node
1055 new_d_Start (dbg_info* db)
1059 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1060 op_Start, mode_T, 0, NULL);
1061 /* res->attr.start.irg = current_ir_graph; */
1063 res = optimize_node (res);
1064 irn_vrfy_irg (res, current_ir_graph);
1069 new_d_End (dbg_info* db)
1072 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1073 op_End, mode_X, -1, NULL);
1074 res = optimize_node (res);
1075 irn_vrfy_irg (res, current_ir_graph);
1080 /* Constructs a Block with a fixed number of predecessors.
1081 Does set current_block. Can be used with automatic Phi
1082 node construction. */
1084 new_d_Block (dbg_info* db, int arity, ir_node **in)
1088 bool has_unknown = false;
1090 res = new_rd_Block (db, current_ir_graph, arity, in);
1092 /* Create and initialize array for Phi-node construction. */
1093 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1094 current_ir_graph->n_loc);
1095 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1097 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1099 if (!has_unknown) res = optimize_node (res);
1100 current_ir_graph->current_block = res;
1102 irn_vrfy_irg (res, current_ir_graph);
1107 /* ***********************************************************************/
1108 /* Methods necessary for automatic Phi node creation */
1110 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1111 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1112 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1113 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1115 Call Graph: ( A ---> B == A "calls" B)
1117 get_value mature_block
1125 get_r_value_internal |
1129 new_rd_Phi0 new_rd_Phi_in
1131 * *************************************************************************** */
1133 /** Creates a Phi node with 0 predecessors */
1134 static INLINE ir_node *
1135 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1138 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1139 irn_vrfy_irg (res, irg);
1143 /* There are two implementations of the Phi node construction. The first
1144 is faster, but does not work for blocks with more than 2 predecessors.
1145 The second works always but is slower and causes more unnecessary Phi
1147 Select the implementations by the following preprocessor flag set in
1149 #if USE_FAST_PHI_CONSTRUCTION
1151 /* This is a stack used for allocating and deallocating nodes in
1152 new_rd_Phi_in. The original implementation used the obstack
1153 to model this stack, now it is explicit. This reduces side effects.
1155 #if USE_EXPLICIT_PHI_IN_STACK
1156 INLINE Phi_in_stack *
1157 new_Phi_in_stack(void) {
1160 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1162 res->stack = NEW_ARR_F (ir_node *, 1);
1169 free_Phi_in_stack(Phi_in_stack *s) {
1170 DEL_ARR_F(s->stack);
1174 free_to_Phi_in_stack(ir_node *phi) {
1175 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1176 current_ir_graph->Phi_in_stack->pos)
1177 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1179 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1181 (current_ir_graph->Phi_in_stack->pos)++;
1184 static INLINE ir_node *
1185 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1186 int arity, ir_node **in) {
1188 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1189 int pos = current_ir_graph->Phi_in_stack->pos;
1193 /* We need to allocate a new node */
1194 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1195 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1197 /* reuse the old node and initialize it again. */
1200 assert (res->kind == k_ir_node);
1201 assert (res->op == op_Phi);
1205 assert (arity >= 0);
1206 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1207 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1209 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1211 (current_ir_graph->Phi_in_stack->pos)--;
1215 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1217 /* Creates a Phi node with a given, fixed array **in of predecessors.
1218 If the Phi node is unnecessary, as the same value reaches the block
1219 through all control flow paths, it is eliminated and the value
1220 returned directly. This constructor is only intended for use in
1221 the automatic Phi node generation triggered by get_value or mature.
1222 The implementation is quite tricky and depends on the fact, that
1223 the nodes are allocated on a stack:
1224 The in array contains predecessors and NULLs. The NULLs appear,
1225 if get_r_value_internal, that computed the predecessors, reached
1226 the same block on two paths. In this case the same value reaches
1227 this block on both paths, there is no definition in between. We need
1228 not allocate a Phi where these path's merge, but we have to communicate
1229 this fact to the caller. This happens by returning a pointer to the
1230 node the caller _will_ allocate. (Yes, we predict the address. We can
1231 do so because the nodes are allocated on the obstack.) The caller then
1232 finds a pointer to itself and, when this routine is called again,
1235 static INLINE ir_node *
1236 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1239 ir_node *res, *known;
1241 /* Allocate a new node on the obstack. This can return a node to
1242 which some of the pointers in the in-array already point.
1243 Attention: the constructor copies the in array, i.e., the later
1244 changes to the array in this routine do not affect the
1245 constructed node! If the in array contains NULLs, there will be
1246 missing predecessors in the returned node. Is this a possible
1247 internal state of the Phi node generation? */
1248 #if USE_EXPLICIT_PHI_IN_STACK
1249 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1251 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1252 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1255 /* The in-array can contain NULLs. These were returned by
1256 get_r_value_internal if it reached the same block/definition on a
1257 second path. The NULLs are replaced by the node itself to
1258 simplify the test in the next loop. */
1259 for (i = 0; i < ins; ++i) {
1264 /* This loop checks whether the Phi has more than one predecessor.
1265 If so, it is a real Phi node and we break the loop. Else the Phi
1266 node merges the same definition on several paths and therefore is
1268 for (i = 0; i < ins; ++i)
1270 if (in[i] == res || in[i] == known) continue;
1278 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1280 #if USE_EXPLICIT_PHI_IN_STACK
1281 free_to_Phi_in_stack(res);
1283 obstack_free (current_ir_graph->obst, res);
1287 res = optimize_node (res);
1288 irn_vrfy_irg (res, irg);
1291 /* return the pointer to the Phi node. This node might be deallocated! */
1296 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1299 allocates and returns this node. The routine called to allocate the
1300 node might optimize it away and return a real value, or even a pointer
1301 to a deallocated Phi node on top of the obstack!
1302 This function is called with an in-array of proper size. **/
1304 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1306 ir_node *prevBlock, *res;
1309 /* This loop goes to all predecessor blocks of the block the Phi node is in
1310 and there finds the operands of the Phi node by calling
1311 get_r_value_internal. */
1312 for (i = 1; i <= ins; ++i) {
1313 assert (block->in[i]);
1314 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1316 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1319 /* After collecting all predecessors into the array nin a new Phi node
1320 with these predecessors is created. This constructor contains an
1321 optimization: If all predecessors of the Phi node are identical it
1322 returns the only operand instead of a new Phi node. If the value
1323 passes two different control flow edges without being defined, and
1324 this is the second path treated, a pointer to the node that will be
1325 allocated for the first path (recursion) is returned. We already
1326 know the address of this node, as it is the next node to be allocated
1327 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1328 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1330 /* Now we now the value for "pos" and can enter it in the array with
1331 all known local variables. Attention: this might be a pointer to
1332 a node, that later will be allocated!!! See new_rd_Phi_in.
1333 If this is called in mature, after some set_value in the same block,
1334 the proper value must not be overwritten:
1336 get_value (makes Phi0, put's it into graph_arr)
1337 set_value (overwrites Phi0 in graph_arr)
1338 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1341 if (!block->attr.block.graph_arr[pos]) {
1342 block->attr.block.graph_arr[pos] = res;
1344 /* printf(" value already computed by %s\n",
1345 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1351 /* This function returns the last definition of a variable. In case
1352 this variable was last defined in a previous block, Phi nodes are
1353 inserted. If the part of the firm graph containing the definition
1354 is not yet constructed, a dummy Phi node is returned. */
1356 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1359 /* There are 4 cases to treat.
1361 1. The block is not mature and we visit it the first time. We can not
1362 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1363 predecessors is returned. This node is added to the linked list (field
1364 "link") of the containing block to be completed when this block is
1365 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1368 2. The value is already known in this block, graph_arr[pos] is set and we
1369 visit the block the first time. We can return the value without
1370 creating any new nodes.
1372 3. The block is mature and we visit it the first time. A Phi node needs
1373 to be created (phi_merge). If the Phi is not needed, as all it's
1374 operands are the same value reaching the block through different
1375 paths, it's optimized away and the value itself is returned.
1377 4. The block is mature, and we visit it the second time. Now two
1378 subcases are possible:
1379 * The value was computed completely the last time we were here. This
1380 is the case if there is no loop. We can return the proper value.
1381 * The recursion that visited this node and set the flag did not
1382 return yet. We are computing a value in a loop and need to
1383 break the recursion without knowing the result yet.
1384 @@@ strange case. Straight forward we would create a Phi before
1385 starting the computation of it's predecessors. In this case we will
1386 find a Phi here in any case. The problem is that this implementation
1387 only creates a Phi after computing the predecessors, so that it is
1388 hard to compute self references of this Phi. @@@
1389 There is no simple check for the second subcase. Therefore we check
1390 for a second visit and treat all such cases as the second subcase.
1391 Anyways, the basic situation is the same: we reached a block
1392 on two paths without finding a definition of the value: No Phi
1393 nodes are needed on both paths.
1394 We return this information "Two paths, no Phi needed" by a very tricky
1395 implementation that relies on the fact that an obstack is a stack and
1396 will return a node with the same address on different allocations.
1397 Look also at phi_merge and new_rd_phi_in to understand this.
1398 @@@ Unfortunately this does not work, see testprogram
1399 three_cfpred_example.
1403 /* case 4 -- already visited. */
1404 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1406 /* visited the first time */
1407 set_irn_visited(block, get_irg_visited(current_ir_graph));
1409 /* Get the local valid value */
1410 res = block->attr.block.graph_arr[pos];
1412 /* case 2 -- If the value is actually computed, return it. */
1413 if (res) return res;
1415 if (block->attr.block.matured) { /* case 3 */
1417 /* The Phi has the same amount of ins as the corresponding block. */
1418 int ins = get_irn_arity(block);
1420 NEW_ARR_A (ir_node *, nin, ins);
1422 /* Phi merge collects the predecessors and then creates a node. */
1423 res = phi_merge (block, pos, mode, nin, ins);
1425 } else { /* case 1 */
1426 /* The block is not mature, we don't know how many in's are needed. A Phi
1427 with zero predecessors is created. Such a Phi node is called Phi0
1428 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1429 to the list of Phi0 nodes in this block to be matured by mature_block
1431 The Phi0 has to remember the pos of it's internal value. If the real
1432 Phi is computed, pos is used to update the array with the local
1435 res = new_rd_Phi0 (current_ir_graph, block, mode);
1436 res->attr.phi0_pos = pos;
1437 res->link = block->link;
1441 /* If we get here, the frontend missed a use-before-definition error */
1444 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1445 assert (mode->code >= irm_F && mode->code <= irm_P);
1446 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1447 tarval_mode_null[mode->code]);
1450 /* The local valid value is available now. */
1451 block->attr.block.graph_arr[pos] = res;
1459 it starts the recursion. This causes an Id at the entry of
1460 every block that has no definition of the value! **/
1462 #if USE_EXPLICIT_PHI_IN_STACK
1464 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1465 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1468 static INLINE ir_node *
1469 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1470 ir_node **in, int ins, ir_node *phi0)
1473 ir_node *res, *known;
1475 /* Allocate a new node on the obstack. The allocation copies the in
1477 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1478 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1480 /* This loop checks whether the Phi has more than one predecessor.
1481 If so, it is a real Phi node and we break the loop. Else the
1482 Phi node merges the same definition on several paths and therefore
1483 is not needed. Don't consider Bad nodes! */
1485 for (i=0; i < ins; ++i)
1489 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1491 /* Optimize self referencing Phis: We can't detect them yet properly, as
1492 they still refer to the Phi0 they will replace. So replace right now. */
1493 if (phi0 && in[i] == phi0) in[i] = res;
1495 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1503 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1506 obstack_free (current_ir_graph->obst, res);
1507 if (is_Phi(known)) {
1508 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1509 order, an enclosing Phi know may get superfluous. */
1510 res = optimize_in_place_2(known);
1511 if (res != known) { exchange(known, res); }
1516 /* A undefined value, e.g., in unreachable code. */
1520 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1521 irn_vrfy_irg (res, irg);
1522 /* Memory Phis in endless loops must be kept alive.
1523 As we can't distinguish these easily we keep all of them alive. */
1524 if ((res->op == op_Phi) && (mode == mode_M))
1525 add_End_keepalive(irg->end, res);
1532 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1534 #if PRECISE_EXC_CONTEXT
1536 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1538 /* Construct a new frag_array for node n.
1539 Copy the content from the current graph_arr of the corresponding block:
1540 this is the current state.
1541 Set ProjM(n) as current memory state.
1542 Further the last entry in frag_arr of current block points to n. This
1543 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1545 static INLINE ir_node ** new_frag_arr (ir_node *n)
1550 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1551 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1552 sizeof(ir_node *)*current_ir_graph->n_loc);
1554 /* turn off optimization before allocating Proj nodes, as res isn't
1556 opt = get_opt_optimize(); set_optimize(0);
1557 /* Here we rely on the fact that all frag ops have Memory as first result! */
1558 if (get_irn_op(n) == op_Call)
1559 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1561 assert((pn_Quot_M == pn_DivMod_M) &&
1562 (pn_Quot_M == pn_Div_M) &&
1563 (pn_Quot_M == pn_Mod_M) &&
1564 (pn_Quot_M == pn_Load_M) &&
1565 (pn_Quot_M == pn_Store_M) &&
1566 (pn_Quot_M == pn_Alloc_M) );
1567 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1571 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1575 static INLINE ir_node **
1576 get_frag_arr (ir_node *n) {
1577 if (get_irn_op(n) == op_Call) {
1578 return n->attr.call.frag_arr;
1579 } else if (get_irn_op(n) == op_Alloc) {
1580 return n->attr.a.frag_arr;
1582 return n->attr.frag_arr;
1587 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1589 if (!frag_arr[pos]) frag_arr[pos] = val;
1590 if (frag_arr[current_ir_graph->n_loc - 1]) {
1591 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1592 assert(arr != frag_arr && "Endless recursion detected");
1593 set_frag_value(arr, pos, val);
1598 for (i = 0; i < 1000; ++i) {
1599 if (!frag_arr[pos]) {
1600 frag_arr[pos] = val;
1602 if (frag_arr[current_ir_graph->n_loc - 1]) {
1603 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1609 assert(0 && "potential endless recursion");
1614 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1618 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1620 frag_arr = get_frag_arr(cfOp);
1621 res = frag_arr[pos];
1623 if (block->attr.block.graph_arr[pos]) {
1624 /* There was a set_value after the cfOp and no get_value before that
1625 set_value. We must build a Phi node now. */
1626 if (block->attr.block.matured) {
1627 int ins = get_irn_arity(block);
1629 NEW_ARR_A (ir_node *, nin, ins);
1630 res = phi_merge(block, pos, mode, nin, ins);
1632 res = new_rd_Phi0 (current_ir_graph, block, mode);
1633 res->attr.phi0_pos = pos;
1634 res->link = block->link;
1638 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1639 but this should be better: (remove comment if this works) */
1640 /* It's a Phi, we can write this into all graph_arrs with NULL */
1641 set_frag_value(block->attr.block.graph_arr, pos, res);
1643 res = get_r_value_internal(block, pos, mode);
1644 set_frag_value(block->attr.block.graph_arr, pos, res);
1652 computes the predecessors for the real phi node, and then
1653 allocates and returns this node. The routine called to allocate the
1654 node might optimize it away and return a real value.
1655 This function must be called with an in-array of proper size. **/
1657 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1659 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1662 /* If this block has no value at pos create a Phi0 and remember it
1663 in graph_arr to break recursions.
1664 Else we may not set graph_arr as there a later value is remembered. */
1666 if (!block->attr.block.graph_arr[pos]) {
1667 if (block == get_irg_start_block(current_ir_graph)) {
1668 /* Collapsing to Bad tarvals is no good idea.
1669 So we call a user-supplied routine here that deals with this case as
1670 appropriate for the given language. Sorryly the only help we can give
1671 here is the position.
1673 Even if all variables are defined before use, it can happen that
1674 we get to the start block, if a cond has been replaced by a tuple
1675 (bad, jmp). In this case we call the function needlessly, eventually
1676 generating an non existant error.
1677 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1680 if (default_initialize_local_variable)
1681 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1683 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1684 /* We don't need to care about exception ops in the start block.
1685 There are none by definition. */
1686 return block->attr.block.graph_arr[pos];
1688 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1689 block->attr.block.graph_arr[pos] = phi0;
1690 #if PRECISE_EXC_CONTEXT
1691 if (get_opt_precise_exc_context()) {
1692 /* Set graph_arr for fragile ops. Also here we should break recursion.
1693 We could choose a cyclic path through an cfop. But the recursion would
1694 break at some point. */
1695 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1701 /* This loop goes to all predecessor blocks of the block the Phi node
1702 is in and there finds the operands of the Phi node by calling
1703 get_r_value_internal. */
1704 for (i = 1; i <= ins; ++i) {
1705 prevCfOp = skip_Proj(block->in[i]);
1707 if (is_Bad(prevCfOp)) {
1708 /* In case a Cond has been optimized we would get right to the start block
1709 with an invalid definition. */
1710 nin[i-1] = new_Bad();
1713 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1715 if (!is_Bad(prevBlock)) {
1716 #if PRECISE_EXC_CONTEXT
1717 if (get_opt_precise_exc_context() &&
1718 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1719 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1720 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1723 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1725 nin[i-1] = new_Bad();
1729 /* We want to pass the Phi0 node to the constructor: this finds additional
1730 optimization possibilities.
1731 The Phi0 node either is allocated in this function, or it comes from
1732 a former call to get_r_value_internal. In this case we may not yet
1733 exchange phi0, as this is done in mature_block. */
1735 phi0_all = block->attr.block.graph_arr[pos];
1736 if (!((get_irn_op(phi0_all) == op_Phi) &&
1737 (get_irn_arity(phi0_all) == 0) &&
1738 (get_nodes_block(phi0_all) == block)))
1744 /* After collecting all predecessors into the array nin a new Phi node
1745 with these predecessors is created. This constructor contains an
1746 optimization: If all predecessors of the Phi node are identical it
1747 returns the only operand instead of a new Phi node. */
1748 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1750 /* In case we allocated a Phi0 node at the beginning of this procedure,
1751 we need to exchange this Phi0 with the real Phi. */
1753 exchange(phi0, res);
1754 block->attr.block.graph_arr[pos] = res;
1755 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1756 only an optimization. */
1762 /* This function returns the last definition of a variable. In case
1763 this variable was last defined in a previous block, Phi nodes are
1764 inserted. If the part of the firm graph containing the definition
1765 is not yet constructed, a dummy Phi node is returned. */
1767 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1770 /* There are 4 cases to treat.
1772 1. The block is not mature and we visit it the first time. We can not
1773 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1774 predecessors is returned. This node is added to the linked list (field
1775 "link") of the containing block to be completed when this block is
1776 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1779 2. The value is already known in this block, graph_arr[pos] is set and we
1780 visit the block the first time. We can return the value without
1781 creating any new nodes.
1783 3. The block is mature and we visit it the first time. A Phi node needs
1784 to be created (phi_merge). If the Phi is not needed, as all it's
1785 operands are the same value reaching the block through different
1786 paths, it's optimized away and the value itself is returned.
1788 4. The block is mature, and we visit it the second time. Now two
1789 subcases are possible:
1790 * The value was computed completely the last time we were here. This
1791 is the case if there is no loop. We can return the proper value.
1792 * The recursion that visited this node and set the flag did not
1793 return yet. We are computing a value in a loop and need to
1794 break the recursion. This case only happens if we visited
1795 the same block with phi_merge before, which inserted a Phi0.
1796 So we return the Phi0.
1799 /* case 4 -- already visited. */
1800 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1801 /* As phi_merge allocates a Phi0 this value is always defined. Here
1802 is the critical difference of the two algorithms. */
1803 assert(block->attr.block.graph_arr[pos]);
1804 return block->attr.block.graph_arr[pos];
1807 /* visited the first time */
1808 set_irn_visited(block, get_irg_visited(current_ir_graph));
1810 /* Get the local valid value */
1811 res = block->attr.block.graph_arr[pos];
1813 /* case 2 -- If the value is actually computed, return it. */
1814 if (res) { return res; };
1816 if (block->attr.block.matured) { /* case 3 */
1818 /* The Phi has the same amount of ins as the corresponding block. */
1819 int ins = get_irn_arity(block);
1821 NEW_ARR_A (ir_node *, nin, ins);
1823 /* Phi merge collects the predecessors and then creates a node. */
1824 res = phi_merge (block, pos, mode, nin, ins);
1826 } else { /* case 1 */
1827 /* The block is not mature, we don't know how many in's are needed. A Phi
1828 with zero predecessors is created. Such a Phi node is called Phi0
1829 node. The Phi0 is then added to the list of Phi0 nodes in this block
1830 to be matured by mature_block later.
1831 The Phi0 has to remember the pos of it's internal value. If the real
1832 Phi is computed, pos is used to update the array with the local
1834 res = new_rd_Phi0 (current_ir_graph, block, mode);
1835 res->attr.phi0_pos = pos;
1836 res->link = block->link;
1840 /* If we get here, the frontend missed a use-before-definition error */
1843 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1844 assert (mode->code >= irm_F && mode->code <= irm_P);
1845 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1846 get_mode_null(mode));
1849 /* The local valid value is available now. */
1850 block->attr.block.graph_arr[pos] = res;
1855 #endif /* USE_FAST_PHI_CONSTRUCTION */
1857 /* ************************************************************************** */
1859 /** Finalize a Block node, when all control flows are known. */
1860 /** Acceptable parameters are only Block nodes. */
1862 mature_block (ir_node *block)
1869 assert (get_irn_opcode(block) == iro_Block);
1870 /* @@@ should be commented in
1871 assert (!get_Block_matured(block) && "Block already matured"); */
1873 if (!get_Block_matured(block)) {
1874 ins = ARR_LEN (block->in)-1;
1875 /* Fix block parameters */
1876 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1878 /* An array for building the Phi nodes. */
1879 NEW_ARR_A (ir_node *, nin, ins);
1881 /* Traverse a chain of Phi nodes attached to this block and mature
1883 for (n = block->link; n; n=next) {
1884 inc_irg_visited(current_ir_graph);
1886 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1889 block->attr.block.matured = 1;
1891 /* Now, as the block is a finished firm node, we can optimize it.
1892 Since other nodes have been allocated since the block was created
1893 we can not free the node on the obstack. Therefore we have to call
1895 Unfortunately the optimization does not change a lot, as all allocated
1896 nodes refer to the unoptimized node.
1897 We can call _2, as global cse has no effect on blocks. */
1898 block = optimize_in_place_2(block);
1899 irn_vrfy_irg(block, current_ir_graph);
1904 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1906 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1911 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1913 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1918 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1920 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1926 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1928 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1933 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1935 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1940 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1943 assert(arg->op == op_Cond);
1944 arg->attr.c.kind = fragmentary;
1945 arg->attr.c.default_proj = max_proj;
1946 res = new_Proj (arg, mode_X, max_proj);
1951 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1953 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1958 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1960 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1964 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1966 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1971 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1973 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1978 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1980 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1986 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1988 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1993 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1995 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
2000 * allocate the frag array
2002 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2003 if (get_opt_precise_exc_context()) {
2004 if ((current_ir_graph->phase_state == phase_building) &&
2005 (get_irn_op(res) == op) && /* Could be optimized away. */
2006 !*frag_store) /* Could be a cse where the arr is already set. */ {
2007 *frag_store = new_frag_arr(res);
2014 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2017 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2019 #if PRECISE_EXC_CONTEXT
2020 allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */
2027 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2030 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2032 #if PRECISE_EXC_CONTEXT
2033 allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */
2040 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2043 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2045 #if PRECISE_EXC_CONTEXT
2046 allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */
2053 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2056 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2058 #if PRECISE_EXC_CONTEXT
2059 allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */
2066 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2068 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2073 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2075 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2080 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2082 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2087 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2089 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2094 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2096 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2101 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2103 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2108 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2110 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2115 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2117 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2122 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2124 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2129 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2131 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2136 new_d_Jmp (dbg_info* db)
2138 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2142 new_d_Cond (dbg_info* db, ir_node *c)
2144 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2148 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2152 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2153 store, callee, arity, in, tp);
2154 #if PRECISE_EXC_CONTEXT
2155 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2162 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2164 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2169 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2171 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2176 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2179 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2181 #if PRECISE_EXC_CONTEXT
2182 allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */
2189 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2192 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2194 #if PRECISE_EXC_CONTEXT
2195 allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */
2202 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2206 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2207 store, size, alloc_type, where);
2208 #if PRECISE_EXC_CONTEXT
2209 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2216 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2218 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2219 store, ptr, size, free_type);
2223 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2224 /* GL: objptr was called frame before. Frame was a bad choice for the name
2225 as the operand could as well be a pointer to a dynamic object. */
2227 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2228 store, objptr, 0, NULL, ent);
2232 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2234 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2235 store, objptr, n_index, index, sel);
2239 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2241 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2242 store, objptr, ent));
2246 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2248 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2253 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2255 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2263 return current_ir_graph->bad;
2267 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2269 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2274 new_d_Unknown (ir_mode *m)
2276 return new_rd_Unknown(current_ir_graph, m);
2280 new_d_CallBegin (dbg_info *db, ir_node *call)
2283 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2288 new_d_EndReg (dbg_info *db)
2291 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2296 new_d_EndExcept (dbg_info *db)
2299 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2304 new_d_Break (dbg_info *db)
2306 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2310 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2312 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2317 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2321 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2322 callee, arity, in, tp);
2327 /* ********************************************************************* */
2328 /* Comfortable interface with automatic Phi node construction. */
2329 /* (Uses also constructors of ?? interface, except new_Block. */
2330 /* ********************************************************************* */
2332 /* * Block construction **/
2333 /* immature Block without predecessors */
2334 ir_node *new_d_immBlock (dbg_info* db) {
2337 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2338 /* creates a new dynamic in-array as length of in is -1 */
2339 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2340 current_ir_graph->current_block = res;
2341 res->attr.block.matured = 0;
2342 /* res->attr.block.exc = exc_normal; */
2343 /* res->attr.block.handler_entry = 0; */
2344 res->attr.block.irg = current_ir_graph;
2345 res->attr.block.backedge = NULL;
2346 res->attr.block.in_cg = NULL;
2347 res->attr.block.cg_backedge = NULL;
2348 set_Block_block_visited(res, 0);
2350 /* Create and initialize array for Phi-node construction. */
2351 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2352 current_ir_graph->n_loc);
2353 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2355 /* Immature block may not be optimized! */
2356 irn_vrfy_irg (res, current_ir_graph);
2363 return new_d_immBlock(NULL);
2366 /* add an adge to a jmp/control flow node */
2368 add_in_edge (ir_node *block, ir_node *jmp)
2370 if (block->attr.block.matured) {
2371 assert(0 && "Error: Block already matured!\n");
2374 assert (jmp != NULL);
2375 ARR_APP1 (ir_node *, block->in, jmp);
2379 /* changing the current block */
2381 switch_block (ir_node *target)
2383 current_ir_graph->current_block = target;
2386 /* ************************ */
2387 /* parameter administration */
2389 /* get a value from the parameter array from the current block by its index */
2391 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2393 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2394 inc_irg_visited(current_ir_graph);
2396 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2398 /* get a value from the parameter array from the current block by its index */
2400 get_value (int pos, ir_mode *mode)
2402 return get_d_value(NULL, pos, mode);
2405 /* set a value at position pos in the parameter array from the current block */
2407 set_value (int pos, ir_node *value)
2409 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2410 assert(pos+1 < current_ir_graph->n_loc);
2411 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2414 /* get the current store */
2418 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2419 /* GL: one could call get_value instead */
2420 inc_irg_visited(current_ir_graph);
2421 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2424 /* set the current store */
2426 set_store (ir_node *store)
2428 /* GL: one could call set_value instead */
2429 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2430 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2434 keep_alive (ir_node *ka)
2436 add_End_keepalive(current_ir_graph->end, ka);
2439 /** Useful access routines **/
2440 /* Returns the current block of the current graph. To set the current
2441 block use switch_block(). */
2442 ir_node *get_cur_block() {
2443 return get_irg_current_block(current_ir_graph);
2446 /* Returns the frame type of the current graph */
2447 type *get_cur_frame_type() {
2448 return get_irg_frame_type(current_ir_graph);
2452 /* ********************************************************************* */
2455 /* call once for each run of the library */
2457 init_cons (default_initialize_local_variable_func_t *func)
2459 default_initialize_local_variable = func;
2462 /* call for each graph */
2464 finalize_cons (ir_graph *irg) {
2465 irg->phase_state = phase_high;
2469 ir_node *new_Block(int arity, ir_node **in) {
2470 return new_d_Block(NULL, arity, in);
2472 ir_node *new_Start (void) {
2473 return new_d_Start(NULL);
2475 ir_node *new_End (void) {
2476 return new_d_End(NULL);
2478 ir_node *new_Jmp (void) {
2479 return new_d_Jmp(NULL);
2481 ir_node *new_Cond (ir_node *c) {
2482 return new_d_Cond(NULL, c);
2484 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2485 return new_d_Return(NULL, store, arity, in);
2487 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2488 return new_d_Raise(NULL, store, obj);
2490 ir_node *new_Const (ir_mode *mode, tarval *con) {
2491 return new_d_Const(NULL, mode, con);
2493 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2494 return new_d_SymConst(NULL, value, kind);
2496 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2497 return new_d_simpleSel(NULL, store, objptr, ent);
2499 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2501 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2503 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2504 return new_d_InstOf (NULL, store, objptr, ent);
2506 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2508 return new_d_Call(NULL, store, callee, arity, in, tp);
2510 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2511 return new_d_Add(NULL, op1, op2, mode);
2513 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2514 return new_d_Sub(NULL, op1, op2, mode);
2516 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2517 return new_d_Minus(NULL, op, mode);
2519 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2520 return new_d_Mul(NULL, op1, op2, mode);
2522 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2523 return new_d_Quot(NULL, memop, op1, op2);
2525 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2526 return new_d_DivMod(NULL, memop, op1, op2);
2528 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2529 return new_d_Div(NULL, memop, op1, op2);
2531 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2532 return new_d_Mod(NULL, memop, op1, op2);
2534 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2535 return new_d_Abs(NULL, op, mode);
2537 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2538 return new_d_And(NULL, op1, op2, mode);
2540 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2541 return new_d_Or(NULL, op1, op2, mode);
2543 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2544 return new_d_Eor(NULL, op1, op2, mode);
2546 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2547 return new_d_Not(NULL, op, mode);
2549 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2550 return new_d_Shl(NULL, op, k, mode);
2552 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2553 return new_d_Shr(NULL, op, k, mode);
2555 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2556 return new_d_Shrs(NULL, op, k, mode);
2558 #define new_Rotate new_Rot
2559 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2560 return new_d_Rot(NULL, op, k, mode);
2562 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2563 return new_d_Cmp(NULL, op1, op2);
2565 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2566 return new_d_Conv(NULL, op, mode);
2568 ir_node *new_Cast (ir_node *op, type *to_tp) {
2569 return new_d_Cast(NULL, op, to_tp);
2571 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2572 return new_d_Phi(NULL, arity, in, mode);
2574 ir_node *new_Load (ir_node *store, ir_node *addr) {
2575 return new_d_Load(NULL, store, addr);
2577 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2578 return new_d_Store(NULL, store, addr, val);
2580 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2581 where_alloc where) {
2582 return new_d_Alloc(NULL, store, size, alloc_type, where);
2584 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2586 return new_d_Free(NULL, store, ptr, size, free_type);
2588 ir_node *new_Sync (int arity, ir_node **in) {
2589 return new_d_Sync(NULL, arity, in);
2591 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2592 return new_d_Proj(NULL, arg, mode, proj);
2594 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2595 return new_d_defaultProj(NULL, arg, max_proj);
2597 ir_node *new_Tuple (int arity, ir_node **in) {
2598 return new_d_Tuple(NULL, arity, in);
2600 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2601 return new_d_Id(NULL, val, mode);
2603 ir_node *new_Bad (void) {
2606 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2607 return new_d_Confirm (NULL, val, bound, cmp);
2609 ir_node *new_Unknown(ir_mode *m) {
2610 return new_d_Unknown(m);
2612 ir_node *new_CallBegin (ir_node *callee) {
2613 return new_d_CallBegin(NULL, callee);
2615 ir_node *new_EndReg (void) {
2616 return new_d_EndReg(NULL);
2618 ir_node *new_EndExcept (void) {
2619 return new_d_EndExcept(NULL);
2621 ir_node *new_Break (void) {
2622 return new_d_Break(NULL);
2624 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2625 return new_d_Filter(NULL, arg, mode, proj);
2627 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2628 return new_d_FuncCall(NULL, callee, arity, in, tp);