3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 /* res->attr.block.exc = exc_normal; */
64 /* res->attr.block.handler_entry = 0; */
65 res->attr.block.irg = irg;
66 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
67 res->attr.block.in_cg = NULL;
68 res->attr.block.cg_backedge = NULL;
70 irn_vrfy_irg (res, irg);
75 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
79 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
80 /* res->attr.start.irg = irg; */
82 irn_vrfy_irg (res, irg);
87 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
91 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
93 irn_vrfy_irg (res, irg);
97 /* Creates a Phi node with all predecessors. Calling this constructor
98 is only allowed if the corresponding block is mature. */
100 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
104 bool has_unknown = false;
106 /* Don't assert that block matured: the use of this constructor is strongly
108 if ( get_Block_matured(block) )
109 assert( get_irn_arity(block) == arity );
111 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
113 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
115 for (i = arity-1; i >= 0; i--)
116 if (get_irn_op(in[i]) == op_Unknown) {
121 if (!has_unknown) res = optimize_node (res);
122 irn_vrfy_irg (res, irg);
124 /* Memory Phis in endless loops must be kept alive.
125 As we can't distinguish these easily we keep all of them alive. */
126 if ((res->op == op_Phi) && (mode == mode_M))
127 add_End_keepalive(irg->end, res);
132 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
135 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
136 res->attr.con.tv = con;
137 set_Const_type(res, tp); /* Call method because of complex assertion. */
138 res = optimize_node (res);
139 assert(get_Const_type(res) == tp);
140 irn_vrfy_irg (res, irg);
143 res = local_optimize_newby (res);
150 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
152 type *tp = unknown_type;
153 if (tarval_is_entity(con))
154 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
155 return new_rd_Const_type (db, irg, block, mode, con, tp);
159 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
164 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
165 res = optimize_node (res);
166 irn_vrfy_irg (res, irg);
171 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
177 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
178 res->attr.proj = proj;
181 assert(get_Proj_pred(res));
182 assert(get_nodes_Block(get_Proj_pred(res)));
184 res = optimize_node (res);
186 irn_vrfy_irg (res, irg);
192 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
196 assert(arg->op == op_Cond);
197 arg->attr.c.kind = fragmentary;
198 arg->attr.c.default_proj = max_proj;
199 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
204 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
209 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
210 res = optimize_node (res);
211 irn_vrfy_irg (res, irg);
216 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
219 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
220 res->attr.cast.totype = to_tp;
221 res = optimize_node (res);
222 irn_vrfy_irg (res, irg);
227 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
231 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
232 res = optimize_node (res);
233 irn_vrfy_irg (res, irg);
238 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
239 ir_node *op1, ir_node *op2, ir_mode *mode)
245 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
246 res = optimize_node (res);
247 irn_vrfy_irg (res, irg);
252 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
253 ir_node *op1, ir_node *op2, ir_mode *mode)
259 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
260 res = optimize_node (res);
261 irn_vrfy_irg (res, irg);
266 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
267 ir_node *op, ir_mode *mode)
272 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
273 res = optimize_node (res);
274 irn_vrfy_irg (res, irg);
279 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
280 ir_node *op1, ir_node *op2, ir_mode *mode)
286 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
287 res = optimize_node (res);
288 irn_vrfy_irg (res, irg);
293 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
294 ir_node *memop, ir_node *op1, ir_node *op2)
301 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
302 res = optimize_node (res);
303 irn_vrfy_irg (res, irg);
308 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
309 ir_node *memop, ir_node *op1, ir_node *op2)
316 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
317 res = optimize_node (res);
318 irn_vrfy_irg (res, irg);
323 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
324 ir_node *memop, ir_node *op1, ir_node *op2)
331 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
332 res = optimize_node (res);
333 irn_vrfy_irg (res, irg);
338 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
339 ir_node *memop, ir_node *op1, ir_node *op2)
346 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
347 res = optimize_node (res);
348 irn_vrfy_irg (res, irg);
353 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
354 ir_node *op1, ir_node *op2, ir_mode *mode)
360 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
361 res = optimize_node (res);
362 irn_vrfy_irg (res, irg);
367 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
368 ir_node *op1, ir_node *op2, ir_mode *mode)
374 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
375 res = optimize_node (res);
376 irn_vrfy_irg (res, irg);
381 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
382 ir_node *op1, ir_node *op2, ir_mode *mode)
388 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
389 res = optimize_node (res);
390 irn_vrfy_irg (res, irg);
395 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
396 ir_node *op, ir_mode *mode)
401 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
402 res = optimize_node (res);
403 irn_vrfy_irg (res, irg);
408 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
409 ir_node *op, ir_node *k, ir_mode *mode)
415 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
416 res = optimize_node (res);
417 irn_vrfy_irg (res, irg);
422 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
423 ir_node *op, ir_node *k, ir_mode *mode)
429 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
430 res = optimize_node (res);
431 irn_vrfy_irg (res, irg);
436 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
437 ir_node *op, ir_node *k, ir_mode *mode)
443 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
444 res = optimize_node (res);
445 irn_vrfy_irg (res, irg);
450 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
451 ir_node *op, ir_node *k, ir_mode *mode)
457 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
458 res = optimize_node (res);
459 irn_vrfy_irg (res, irg);
464 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
465 ir_node *op, ir_mode *mode)
470 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
471 res = optimize_node (res);
472 irn_vrfy_irg (res, irg);
477 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
478 ir_node *op1, ir_node *op2)
484 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
485 res = optimize_node (res);
486 irn_vrfy_irg (res, irg);
491 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
494 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
495 res = optimize_node (res);
496 irn_vrfy_irg (res, irg);
501 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
506 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
507 res->attr.c.kind = dense;
508 res->attr.c.default_proj = 0;
509 res = optimize_node (res);
510 irn_vrfy_irg (res, irg);
515 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
516 ir_node *callee, int arity, ir_node **in, type *tp)
523 NEW_ARR_A (ir_node *, r_in, r_arity);
526 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
528 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
530 assert(is_method_type(tp));
531 set_Call_type(res, tp);
532 res->attr.call.callee_arr = NULL;
533 res = optimize_node (res);
534 irn_vrfy_irg (res, irg);
539 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
540 ir_node *store, int arity, ir_node **in)
547 NEW_ARR_A (ir_node *, r_in, r_arity);
549 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
550 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
551 res = optimize_node (res);
552 irn_vrfy_irg (res, irg);
557 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
563 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
564 res = optimize_node (res);
565 irn_vrfy_irg (res, irg);
570 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
571 ir_node *store, ir_node *adr)
577 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
579 res = optimize_node (res);
580 irn_vrfy_irg (res, irg);
585 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
586 ir_node *store, ir_node *adr, ir_node *val)
593 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
595 res = optimize_node (res);
597 irn_vrfy_irg (res, irg);
602 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
603 ir_node *size, type *alloc_type, where_alloc where)
609 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
611 res->attr.a.where = where;
612 res->attr.a.type = alloc_type;
614 res = optimize_node (res);
615 irn_vrfy_irg (res, irg);
620 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
621 ir_node *ptr, ir_node *size, type *free_type)
628 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
630 res->attr.f = free_type;
632 res = optimize_node (res);
633 irn_vrfy_irg (res, irg);
638 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
639 int arity, ir_node **in, entity *ent)
645 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
648 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
651 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
652 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
654 res->attr.s.ent = ent;
656 res = optimize_node (res);
657 irn_vrfy_irg (res, irg);
662 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
663 ir_node *objptr, type *ent)
670 NEW_ARR_A (ir_node *, r_in, r_arity);
674 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
676 res->attr.io.ent = ent;
678 /* res = optimize (res);
679 * irn_vrfy_irg (res, irg); */
684 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
685 symconst_kind symkind)
689 if (symkind == linkage_ptr_info)
693 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
695 res->attr.i.num = symkind;
696 if (symkind == linkage_ptr_info) {
697 res->attr.i.tori.ptrinfo = (ident *)value;
699 assert ( ( (symkind == type_tag)
700 || (symkind == size))
701 && (is_type(value)));
702 res->attr.i.tori.typ = (type *)value;
704 res = optimize_node (res);
705 irn_vrfy_irg (res, irg);
710 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
714 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
716 res = optimize_node (res);
717 irn_vrfy_irg (res, irg);
722 new_rd_Bad (ir_graph *irg)
728 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
730 ir_node *in[2], *res;
734 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
736 res->attr.confirm_cmp = cmp;
738 res = optimize_node (res);
739 irn_vrfy_irg(res, irg);
744 new_rd_Unknown (ir_graph *irg, ir_mode *m)
746 return new_ir_node (NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
750 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
754 in[0] = get_Call_ptr(call);
755 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
756 /* res->attr.callbegin.irg = irg; */
757 res->attr.callbegin.call = call;
758 res = optimize_node (res);
759 irn_vrfy_irg (res, irg);
764 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
768 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
771 irn_vrfy_irg (res, irg);
776 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
780 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
781 irg->end_except = res;
783 irn_vrfy_irg (res, irg);
788 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
791 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
792 res = optimize_node (res);
793 irn_vrfy_irg (res, irg);
798 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
804 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
805 res->attr.filter.proj = proj;
806 res->attr.filter.in_cg = NULL;
807 res->attr.filter.backedge = NULL;
810 assert(get_Proj_pred(res));
811 assert(get_nodes_Block(get_Proj_pred(res)));
813 res = optimize_node (res);
815 irn_vrfy_irg (res, irg);
821 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
822 ir_node *callee, int arity, ir_node **in, type *tp)
829 NEW_ARR_A (ir_node *, r_in, r_arity);
831 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
833 res = new_ir_node (db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
835 assert(is_method_type(tp));
836 set_FuncCall_type(res, tp);
837 res->attr.call.callee_arr = NULL;
838 res = optimize_node (res);
839 irn_vrfy_irg (res, irg);
844 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
845 return new_rd_Block(NULL, irg, arity, in);
847 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
848 return new_rd_Start(NULL, irg, block);
850 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
851 return new_rd_End(NULL, irg, block);
853 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
854 return new_rd_Jmp(NULL, irg, block);
856 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
857 return new_rd_Cond(NULL, irg, block, c);
859 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
860 ir_node *store, int arity, ir_node **in) {
861 return new_rd_Return(NULL, irg, block, store, arity, in);
863 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
864 ir_node *store, ir_node *obj) {
865 return new_rd_Raise(NULL, irg, block, store, obj);
867 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
868 ir_mode *mode, tarval *con) {
869 return new_rd_Const(NULL, irg, block, mode, con);
871 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
872 type_or_id_p value, symconst_kind symkind) {
873 return new_rd_SymConst(NULL, irg, block, value, symkind);
875 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
876 ir_node *objptr, int n_index, ir_node **index,
878 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
880 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
882 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
884 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
885 ir_node *callee, int arity, ir_node **in,
887 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
889 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
890 ir_node *op1, ir_node *op2, ir_mode *mode) {
891 return new_rd_Add(NULL, irg, block, op1, op2, mode);
893 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
894 ir_node *op1, ir_node *op2, ir_mode *mode) {
895 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
897 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
898 ir_node *op, ir_mode *mode) {
899 return new_rd_Minus(NULL, irg, block, op, mode);
901 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
902 ir_node *op1, ir_node *op2, ir_mode *mode) {
903 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
905 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
906 ir_node *memop, ir_node *op1, ir_node *op2) {
907 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
909 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
910 ir_node *memop, ir_node *op1, ir_node *op2) {
911 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
913 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
914 ir_node *memop, ir_node *op1, ir_node *op2) {
915 return new_rd_Div(NULL, irg, block, memop, op1, op2);
917 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
918 ir_node *memop, ir_node *op1, ir_node *op2) {
919 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
921 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
922 ir_node *op, ir_mode *mode) {
923 return new_rd_Abs(NULL, irg, block, op, mode);
925 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
926 ir_node *op1, ir_node *op2, ir_mode *mode) {
927 return new_rd_And(NULL, irg, block, op1, op2, mode);
929 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
930 ir_node *op1, ir_node *op2, ir_mode *mode) {
931 return new_rd_Or(NULL, irg, block, op1, op2, mode);
933 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
934 ir_node *op1, ir_node *op2, ir_mode *mode) {
935 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
937 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
938 ir_node *op, ir_mode *mode) {
939 return new_rd_Not(NULL, irg, block, op, mode);
941 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
942 ir_node *op1, ir_node *op2) {
943 return new_rd_Cmp(NULL, irg, block, op1, op2);
945 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
946 ir_node *op, ir_node *k, ir_mode *mode) {
947 return new_rd_Shl(NULL, irg, block, op, k, mode);
949 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
950 ir_node *op, ir_node *k, ir_mode *mode) {
951 return new_rd_Shr(NULL, irg, block, op, k, mode);
953 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
954 ir_node *op, ir_node *k, ir_mode *mode) {
955 return new_rd_Shrs(NULL, irg, block, op, k, mode);
957 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
958 ir_node *op, ir_node *k, ir_mode *mode) {
959 return new_rd_Rot(NULL, irg, block, op, k, mode);
961 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
962 ir_node *op, ir_mode *mode) {
963 return new_rd_Conv(NULL, irg, block, op, mode);
965 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
966 return new_rd_Cast(NULL, irg, block, op, to_tp);
968 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
969 ir_node **in, ir_mode *mode) {
970 return new_rd_Phi(NULL, irg, block, arity, in, mode);
972 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
973 ir_node *store, ir_node *adr) {
974 return new_rd_Load(NULL, irg, block, store, adr);
976 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
977 ir_node *store, ir_node *adr, ir_node *val) {
978 return new_rd_Store(NULL, irg, block, store, adr, val);
980 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
981 ir_node *size, type *alloc_type, where_alloc where) {
982 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
984 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
985 ir_node *ptr, ir_node *size, type *free_type) {
986 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
988 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
989 return new_rd_Sync(NULL, irg, block, arity, in);
991 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
992 ir_mode *mode, long proj) {
993 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
995 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
997 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
999 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1000 int arity, ir_node **in) {
1001 return new_rd_Tuple(NULL, irg, block, arity, in );
1003 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1004 ir_node *val, ir_mode *mode) {
1005 return new_rd_Id(NULL, irg, block, val, mode);
1007 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1008 return new_rd_Bad(irg);
1010 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1011 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1013 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1014 return new_rd_Unknown(irg, m);
1016 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1017 return new_rd_CallBegin(NULL, irg, block, callee);
1019 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1020 return new_rd_EndReg(NULL, irg, block);
1022 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1023 return new_rd_EndExcept(NULL, irg, block);
1025 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1026 return new_rd_Break(NULL, irg, block);
1028 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1029 ir_mode *mode, long proj) {
1030 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1032 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1033 ir_node *callee, int arity, ir_node **in,
1035 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1039 /** ********************/
1040 /** public interfaces */
1041 /** construction tools */
1045 * - create a new Start node in the current block
1047 * @return s - pointer to the created Start node
1052 new_d_Start (dbg_info* db)
1056 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1057 op_Start, mode_T, 0, NULL);
1058 /* res->attr.start.irg = current_ir_graph; */
1060 res = optimize_node (res);
1061 irn_vrfy_irg (res, current_ir_graph);
1066 new_d_End (dbg_info* db)
1069 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1070 op_End, mode_X, -1, NULL);
1071 res = optimize_node (res);
1072 irn_vrfy_irg (res, current_ir_graph);
1077 /* Constructs a Block with a fixed number of predecessors.
1078 Does set current_block. Can be used with automatic Phi
1079 node construction. */
1081 new_d_Block (dbg_info* db, int arity, ir_node **in)
1085 bool has_unknown = false;
1087 res = new_rd_Block (db, current_ir_graph, arity, in);
1089 /* Create and initialize array for Phi-node construction. */
1090 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1091 current_ir_graph->n_loc);
1092 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1094 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1096 if (!has_unknown) res = optimize_node (res);
1097 current_ir_graph->current_block = res;
1099 irn_vrfy_irg (res, current_ir_graph);
1104 /* ***********************************************************************/
1105 /* Methods necessary for automatic Phi node creation */
1107 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1108 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1109 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1110 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1112 Call Graph: ( A ---> B == A "calls" B)
1114 get_value mature_block
1122 get_r_value_internal |
1126 new_rd_Phi0 new_rd_Phi_in
1128 * *************************************************************************** */
1130 /** Creates a Phi node with 0 predecessors */
1131 static INLINE ir_node *
1132 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1135 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1136 irn_vrfy_irg (res, irg);
1140 /* There are two implementations of the Phi node construction. The first
1141 is faster, but does not work for blocks with more than 2 predecessors.
1142 The second works always but is slower and causes more unnecessary Phi
1144 Select the implementations by the following preprocessor flag set in
1146 #if USE_FAST_PHI_CONSTRUCTION
1148 /* This is a stack used for allocating and deallocating nodes in
1149 new_rd_Phi_in. The original implementation used the obstack
1150 to model this stack, now it is explicit. This reduces side effects.
1152 #if USE_EXPLICIT_PHI_IN_STACK
1153 INLINE Phi_in_stack *
1154 new_Phi_in_stack(void) {
1157 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1159 res->stack = NEW_ARR_F (ir_node *, 1);
1166 free_Phi_in_stack(Phi_in_stack *s) {
1167 DEL_ARR_F(s->stack);
1171 free_to_Phi_in_stack(ir_node *phi) {
1172 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1173 current_ir_graph->Phi_in_stack->pos)
1174 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1176 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1178 (current_ir_graph->Phi_in_stack->pos)++;
1181 static INLINE ir_node *
1182 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1183 int arity, ir_node **in) {
1185 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1186 int pos = current_ir_graph->Phi_in_stack->pos;
1190 /* We need to allocate a new node */
1191 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1192 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1194 /* reuse the old node and initialize it again. */
1197 assert (res->kind == k_ir_node);
1198 assert (res->op == op_Phi);
1202 assert (arity >= 0);
1203 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1204 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1206 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1208 (current_ir_graph->Phi_in_stack->pos)--;
1212 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1214 /* Creates a Phi node with a given, fixed array **in of predecessors.
1215 If the Phi node is unnecessary, as the same value reaches the block
1216 through all control flow paths, it is eliminated and the value
1217 returned directly. This constructor is only intended for use in
1218 the automatic Phi node generation triggered by get_value or mature.
1219 The implementation is quite tricky and depends on the fact, that
1220 the nodes are allocated on a stack:
1221 The in array contains predecessors and NULLs. The NULLs appear,
1222 if get_r_value_internal, that computed the predecessors, reached
1223 the same block on two paths. In this case the same value reaches
1224 this block on both paths, there is no definition in between. We need
1225 not allocate a Phi where these path's merge, but we have to communicate
1226 this fact to the caller. This happens by returning a pointer to the
1227 node the caller _will_ allocate. (Yes, we predict the address. We can
1228 do so because the nodes are allocated on the obstack.) The caller then
1229 finds a pointer to itself and, when this routine is called again,
1232 static INLINE ir_node *
1233 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1236 ir_node *res, *known;
1238 /* Allocate a new node on the obstack. This can return a node to
1239 which some of the pointers in the in-array already point.
1240 Attention: the constructor copies the in array, i.e., the later
1241 changes to the array in this routine do not affect the
1242 constructed node! If the in array contains NULLs, there will be
1243 missing predecessors in the returned node. Is this a possible
1244 internal state of the Phi node generation? */
1245 #if USE_EXPLICIT_PHI_IN_STACK
1246 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1248 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1249 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1252 /* The in-array can contain NULLs. These were returned by
1253 get_r_value_internal if it reached the same block/definition on a
1254 second path. The NULLs are replaced by the node itself to
1255 simplify the test in the next loop. */
1256 for (i = 0; i < ins; ++i) {
1261 /* This loop checks whether the Phi has more than one predecessor.
1262 If so, it is a real Phi node and we break the loop. Else the Phi
1263 node merges the same definition on several paths and therefore is
1265 for (i = 0; i < ins; ++i)
1267 if (in[i] == res || in[i] == known) continue;
1275 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1277 #if USE_EXPLICIT_PHI_IN_STACK
1278 free_to_Phi_in_stack(res);
1280 obstack_free (current_ir_graph->obst, res);
1284 res = optimize_node (res);
1285 irn_vrfy_irg (res, irg);
1288 /* return the pointer to the Phi node. This node might be deallocated! */
1293 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1296 allocates and returns this node. The routine called to allocate the
1297 node might optimize it away and return a real value, or even a pointer
1298 to a deallocated Phi node on top of the obstack!
1299 This function is called with an in-array of proper size. **/
1301 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1303 ir_node *prevBlock, *res;
1306 /* This loop goes to all predecessor blocks of the block the Phi node is in
1307 and there finds the operands of the Phi node by calling
1308 get_r_value_internal. */
1309 for (i = 1; i <= ins; ++i) {
1310 assert (block->in[i]);
1311 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1313 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1316 /* After collecting all predecessors into the array nin a new Phi node
1317 with these predecessors is created. This constructor contains an
1318 optimization: If all predecessors of the Phi node are identical it
1319 returns the only operand instead of a new Phi node. If the value
1320 passes two different control flow edges without being defined, and
1321 this is the second path treated, a pointer to the node that will be
1322 allocated for the first path (recursion) is returned. We already
1323 know the address of this node, as it is the next node to be allocated
1324 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1325 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1327 /* Now we now the value for "pos" and can enter it in the array with
1328 all known local variables. Attention: this might be a pointer to
1329 a node, that later will be allocated!!! See new_rd_Phi_in.
1330 If this is called in mature, after some set_value in the same block,
1331 the proper value must not be overwritten:
1333 get_value (makes Phi0, put's it into graph_arr)
1334 set_value (overwrites Phi0 in graph_arr)
1335 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1338 if (!block->attr.block.graph_arr[pos]) {
1339 block->attr.block.graph_arr[pos] = res;
1341 /* printf(" value already computed by %s\n",
1342 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1348 /* This function returns the last definition of a variable. In case
1349 this variable was last defined in a previous block, Phi nodes are
1350 inserted. If the part of the firm graph containing the definition
1351 is not yet constructed, a dummy Phi node is returned. */
1353 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1356 /* There are 4 cases to treat.
1358 1. The block is not mature and we visit it the first time. We can not
1359 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1360 predecessors is returned. This node is added to the linked list (field
1361 "link") of the containing block to be completed when this block is
1362 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1365 2. The value is already known in this block, graph_arr[pos] is set and we
1366 visit the block the first time. We can return the value without
1367 creating any new nodes.
1369 3. The block is mature and we visit it the first time. A Phi node needs
1370 to be created (phi_merge). If the Phi is not needed, as all it's
1371 operands are the same value reaching the block through different
1372 paths, it's optimized away and the value itself is returned.
1374 4. The block is mature, and we visit it the second time. Now two
1375 subcases are possible:
1376 * The value was computed completely the last time we were here. This
1377 is the case if there is no loop. We can return the proper value.
1378 * The recursion that visited this node and set the flag did not
1379 return yet. We are computing a value in a loop and need to
1380 break the recursion without knowing the result yet.
1381 @@@ strange case. Straight forward we would create a Phi before
1382 starting the computation of it's predecessors. In this case we will
1383 find a Phi here in any case. The problem is that this implementation
1384 only creates a Phi after computing the predecessors, so that it is
1385 hard to compute self references of this Phi. @@@
1386 There is no simple check for the second subcase. Therefore we check
1387 for a second visit and treat all such cases as the second subcase.
1388 Anyways, the basic situation is the same: we reached a block
1389 on two paths without finding a definition of the value: No Phi
1390 nodes are needed on both paths.
1391 We return this information "Two paths, no Phi needed" by a very tricky
1392 implementation that relies on the fact that an obstack is a stack and
1393 will return a node with the same address on different allocations.
1394 Look also at phi_merge and new_rd_phi_in to understand this.
1395 @@@ Unfortunately this does not work, see testprogram
1396 three_cfpred_example.
1400 /* case 4 -- already visited. */
1401 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1403 /* visited the first time */
1404 set_irn_visited(block, get_irg_visited(current_ir_graph));
1406 /* Get the local valid value */
1407 res = block->attr.block.graph_arr[pos];
1409 /* case 2 -- If the value is actually computed, return it. */
1410 if (res) return res;
1412 if (block->attr.block.matured) { /* case 3 */
1414 /* The Phi has the same amount of ins as the corresponding block. */
1415 int ins = get_irn_arity(block);
1417 NEW_ARR_A (ir_node *, nin, ins);
1419 /* Phi merge collects the predecessors and then creates a node. */
1420 res = phi_merge (block, pos, mode, nin, ins);
1422 } else { /* case 1 */
1423 /* The block is not mature, we don't know how many in's are needed. A Phi
1424 with zero predecessors is created. Such a Phi node is called Phi0
1425 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1426 to the list of Phi0 nodes in this block to be matured by mature_block
1428 The Phi0 has to remember the pos of it's internal value. If the real
1429 Phi is computed, pos is used to update the array with the local
1432 res = new_rd_Phi0 (current_ir_graph, block, mode);
1433 res->attr.phi0_pos = pos;
1434 res->link = block->link;
1438 /* If we get here, the frontend missed a use-before-definition error */
1441 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1442 assert (mode->code >= irm_F && mode->code <= irm_P);
1443 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1444 tarval_mode_null[mode->code]);
1447 /* The local valid value is available now. */
1448 block->attr.block.graph_arr[pos] = res;
1456 it starts the recursion. This causes an Id at the entry of
1457 every block that has no definition of the value! **/
1459 #if USE_EXPLICIT_PHI_IN_STACK
1461 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1462 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1465 static INLINE ir_node *
1466 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1467 ir_node **in, int ins, ir_node *phi0)
1470 ir_node *res, *known;
1472 /* Allocate a new node on the obstack. The allocation copies the in
1474 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1475 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1477 /* This loop checks whether the Phi has more than one predecessor.
1478 If so, it is a real Phi node and we break the loop. Else the
1479 Phi node merges the same definition on several paths and therefore
1480 is not needed. Don't consider Bad nodes! */
1482 for (i=0; i < ins; ++i)
1486 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1488 /* Optimize self referencing Phis: We can't detect them yet properly, as
1489 they still refer to the Phi0 they will replace. So replace right now. */
1490 if (phi0 && in[i] == phi0) in[i] = res;
1492 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1500 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1503 obstack_free (current_ir_graph->obst, res);
1504 if (is_Phi(known)) {
1505 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1506 order, an enclosing Phi know may get superfluous. */
1507 res = optimize_in_place_2(known);
1508 if (res != known) { exchange(known, res); }
1513 /* A undefined value, e.g., in unreachable code. */
1517 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1518 irn_vrfy_irg (res, irg);
1519 /* Memory Phis in endless loops must be kept alive.
1520 As we can't distinguish these easily we keep all of them alive. */
1521 if ((res->op == op_Phi) && (mode == mode_M))
1522 add_End_keepalive(irg->end, res);
1529 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1531 #if PRECISE_EXC_CONTEXT
1533 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1535 /* Construct a new frag_array for node n.
1536 Copy the content from the current graph_arr of the corresponding block:
1537 this is the current state.
1538 Set ProjM(n) as current memory state.
1539 Further the last entry in frag_arr of current block points to n. This
1540 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1542 static INLINE ir_node ** new_frag_arr (ir_node *n)
1547 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1548 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1549 sizeof(ir_node *)*current_ir_graph->n_loc);
1551 /* turn off optimization before allocating Proj nodes, as res isn't
1553 opt = get_opt_optimize(); set_optimize(0);
1554 /* Here we rely on the fact that all frag ops have Memory as first result! */
1555 if (get_irn_op(n) == op_Call)
1556 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1558 assert((pn_Quot_M == pn_DivMod_M) &&
1559 (pn_Quot_M == pn_Div_M) &&
1560 (pn_Quot_M == pn_Mod_M) &&
1561 (pn_Quot_M == pn_Load_M) &&
1562 (pn_Quot_M == pn_Store_M) &&
1563 (pn_Quot_M == pn_Alloc_M) );
1564 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1568 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1572 static INLINE ir_node **
1573 get_frag_arr (ir_node *n) {
1574 if (get_irn_op(n) == op_Call) {
1575 return n->attr.call.frag_arr;
1576 } else if (get_irn_op(n) == op_Alloc) {
1577 return n->attr.a.frag_arr;
1579 return n->attr.frag_arr;
1584 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1586 if (!frag_arr[pos]) frag_arr[pos] = val;
1587 if (frag_arr[current_ir_graph->n_loc - 1]) {
1588 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1589 assert(arr != frag_arr && "Endless recursion detected");
1590 set_frag_value(arr, pos, val);
1595 for (i = 0; i < 1000; ++i) {
1596 if (!frag_arr[pos]) {
1597 frag_arr[pos] = val;
1599 if (frag_arr[current_ir_graph->n_loc - 1]) {
1600 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1606 assert(0 && "potential endless recursion");
1611 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1615 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1617 frag_arr = get_frag_arr(cfOp);
1618 res = frag_arr[pos];
1620 if (block->attr.block.graph_arr[pos]) {
1621 /* There was a set_value after the cfOp and no get_value before that
1622 set_value. We must build a Phi node now. */
1623 if (block->attr.block.matured) {
1624 int ins = get_irn_arity(block);
1626 NEW_ARR_A (ir_node *, nin, ins);
1627 res = phi_merge(block, pos, mode, nin, ins);
1629 res = new_rd_Phi0 (current_ir_graph, block, mode);
1630 res->attr.phi0_pos = pos;
1631 res->link = block->link;
1635 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1636 but this should be better: (remove comment if this works) */
1637 /* It's a Phi, we can write this into all graph_arrs with NULL */
1638 set_frag_value(block->attr.block.graph_arr, pos, res);
1640 res = get_r_value_internal(block, pos, mode);
1641 set_frag_value(block->attr.block.graph_arr, pos, res);
1649 computes the predecessors for the real phi node, and then
1650 allocates and returns this node. The routine called to allocate the
1651 node might optimize it away and return a real value.
1652 This function must be called with an in-array of proper size. **/
1654 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1656 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1659 /* If this block has no value at pos create a Phi0 and remember it
1660 in graph_arr to break recursions.
1661 Else we may not set graph_arr as there a later value is remembered. */
1663 if (!block->attr.block.graph_arr[pos]) {
1664 if (block == get_irg_start_block(current_ir_graph)) {
1665 /* Collapsing to Bad tarvals is no good idea.
1666 So we call a user-supplied routine here that deals with this case as
1667 appropriate for the given language. Sorryly the only help we can give
1668 here is the position.
1670 Even if all variables are defined before use, it can happen that
1671 we get to the start block, if a cond has been replaced by a tuple
1672 (bad, jmp). In this case we call the function needlessly, eventually
1673 generating an non existant error.
1674 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1677 if (default_initialize_local_variable)
1678 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1680 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1681 /* We don't need to care about exception ops in the start block.
1682 There are none by definition. */
1683 return block->attr.block.graph_arr[pos];
1685 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1686 block->attr.block.graph_arr[pos] = phi0;
1687 #if PRECISE_EXC_CONTEXT
1688 if (get_opt_precise_exc_context()) {
1689 /* Set graph_arr for fragile ops. Also here we should break recursion.
1690 We could choose a cyclic path through an cfop. But the recursion would
1691 break at some point. */
1692 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1698 /* This loop goes to all predecessor blocks of the block the Phi node
1699 is in and there finds the operands of the Phi node by calling
1700 get_r_value_internal. */
1701 for (i = 1; i <= ins; ++i) {
1702 prevCfOp = skip_Proj(block->in[i]);
1704 if (is_Bad(prevCfOp)) {
1705 /* In case a Cond has been optimized we would get right to the start block
1706 with an invalid definition. */
1707 nin[i-1] = new_Bad();
1710 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1712 if (!is_Bad(prevBlock)) {
1713 #if PRECISE_EXC_CONTEXT
1714 if (get_opt_precise_exc_context() &&
1715 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1716 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1717 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1720 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1722 nin[i-1] = new_Bad();
1726 /* We want to pass the Phi0 node to the constructor: this finds additional
1727 optimization possibilities.
1728 The Phi0 node either is allocated in this function, or it comes from
1729 a former call to get_r_value_internal. In this case we may not yet
1730 exchange phi0, as this is done in mature_block. */
1732 phi0_all = block->attr.block.graph_arr[pos];
1733 if (!((get_irn_op(phi0_all) == op_Phi) &&
1734 (get_irn_arity(phi0_all) == 0) &&
1735 (get_nodes_block(phi0_all) == block)))
1741 /* After collecting all predecessors into the array nin a new Phi node
1742 with these predecessors is created. This constructor contains an
1743 optimization: If all predecessors of the Phi node are identical it
1744 returns the only operand instead of a new Phi node. */
1745 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1747 /* In case we allocated a Phi0 node at the beginning of this procedure,
1748 we need to exchange this Phi0 with the real Phi. */
1750 exchange(phi0, res);
1751 block->attr.block.graph_arr[pos] = res;
1752 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1753 only an optimization. */
1759 /* This function returns the last definition of a variable. In case
1760 this variable was last defined in a previous block, Phi nodes are
1761 inserted. If the part of the firm graph containing the definition
1762 is not yet constructed, a dummy Phi node is returned. */
1764 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1767 /* There are 4 cases to treat.
1769 1. The block is not mature and we visit it the first time. We can not
1770 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1771 predecessors is returned. This node is added to the linked list (field
1772 "link") of the containing block to be completed when this block is
1773 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1776 2. The value is already known in this block, graph_arr[pos] is set and we
1777 visit the block the first time. We can return the value without
1778 creating any new nodes.
1780 3. The block is mature and we visit it the first time. A Phi node needs
1781 to be created (phi_merge). If the Phi is not needed, as all it's
1782 operands are the same value reaching the block through different
1783 paths, it's optimized away and the value itself is returned.
1785 4. The block is mature, and we visit it the second time. Now two
1786 subcases are possible:
1787 * The value was computed completely the last time we were here. This
1788 is the case if there is no loop. We can return the proper value.
1789 * The recursion that visited this node and set the flag did not
1790 return yet. We are computing a value in a loop and need to
1791 break the recursion. This case only happens if we visited
1792 the same block with phi_merge before, which inserted a Phi0.
1793 So we return the Phi0.
1796 /* case 4 -- already visited. */
1797 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1798 /* As phi_merge allocates a Phi0 this value is always defined. Here
1799 is the critical difference of the two algorithms. */
1800 assert(block->attr.block.graph_arr[pos]);
1801 return block->attr.block.graph_arr[pos];
1804 /* visited the first time */
1805 set_irn_visited(block, get_irg_visited(current_ir_graph));
1807 /* Get the local valid value */
1808 res = block->attr.block.graph_arr[pos];
1810 /* case 2 -- If the value is actually computed, return it. */
1811 if (res) { return res; };
1813 if (block->attr.block.matured) { /* case 3 */
1815 /* The Phi has the same amount of ins as the corresponding block. */
1816 int ins = get_irn_arity(block);
1818 NEW_ARR_A (ir_node *, nin, ins);
1820 /* Phi merge collects the predecessors and then creates a node. */
1821 res = phi_merge (block, pos, mode, nin, ins);
1823 } else { /* case 1 */
1824 /* The block is not mature, we don't know how many in's are needed. A Phi
1825 with zero predecessors is created. Such a Phi node is called Phi0
1826 node. The Phi0 is then added to the list of Phi0 nodes in this block
1827 to be matured by mature_block later.
1828 The Phi0 has to remember the pos of it's internal value. If the real
1829 Phi is computed, pos is used to update the array with the local
1831 res = new_rd_Phi0 (current_ir_graph, block, mode);
1832 res->attr.phi0_pos = pos;
1833 res->link = block->link;
1837 /* If we get here, the frontend missed a use-before-definition error */
1840 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1841 assert (mode->code >= irm_F && mode->code <= irm_P);
1842 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1843 get_mode_null(mode));
1846 /* The local valid value is available now. */
1847 block->attr.block.graph_arr[pos] = res;
1852 #endif /* USE_FAST_PHI_CONSTRUCTION */
1854 /* ************************************************************************** */
1856 /** Finalize a Block node, when all control flows are known. */
1857 /** Acceptable parameters are only Block nodes. */
1859 mature_block (ir_node *block)
1866 assert (get_irn_opcode(block) == iro_Block);
1867 /* @@@ should be commented in
1868 assert (!get_Block_matured(block) && "Block already matured"); */
1870 if (!get_Block_matured(block)) {
1871 ins = ARR_LEN (block->in)-1;
1872 /* Fix block parameters */
1873 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1875 /* An array for building the Phi nodes. */
1876 NEW_ARR_A (ir_node *, nin, ins);
1878 /* Traverse a chain of Phi nodes attached to this block and mature
1880 for (n = block->link; n; n=next) {
1881 inc_irg_visited(current_ir_graph);
1883 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1886 block->attr.block.matured = 1;
1888 /* Now, as the block is a finished firm node, we can optimize it.
1889 Since other nodes have been allocated since the block was created
1890 we can not free the node on the obstack. Therefore we have to call
1892 Unfortunately the optimization does not change a lot, as all allocated
1893 nodes refer to the unoptimized node.
1894 We can call _2, as global cse has no effect on blocks. */
1895 block = optimize_in_place_2(block);
1896 irn_vrfy_irg(block, current_ir_graph);
1901 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1903 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1908 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1910 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1915 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1917 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1923 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1925 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1930 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1932 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1937 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1940 assert(arg->op == op_Cond);
1941 arg->attr.c.kind = fragmentary;
1942 arg->attr.c.default_proj = max_proj;
1943 res = new_Proj (arg, mode_X, max_proj);
1948 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1950 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1955 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1957 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1961 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1963 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1968 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1970 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1975 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1977 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1983 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1985 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1990 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1992 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1997 * allocate the frag array
1999 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2000 if (get_opt_precise_exc_context()) {
2001 if ((current_ir_graph->phase_state == phase_building) &&
2002 (get_irn_op(res) == op) && /* Could be optimized away. */
2003 !*frag_store) /* Could be a cse where the arr is already set. */ {
2004 *frag_store = new_frag_arr(res);
2011 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2014 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2016 #if PRECISE_EXC_CONTEXT
2017 allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */
2024 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2027 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2029 #if PRECISE_EXC_CONTEXT
2030 allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */
2037 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2040 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2042 #if PRECISE_EXC_CONTEXT
2043 allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */
2050 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2053 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2055 #if PRECISE_EXC_CONTEXT
2056 allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */
2063 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2065 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2070 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2072 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2077 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2079 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2084 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2086 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2091 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2093 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2098 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2100 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2105 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2107 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2112 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2114 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2119 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2121 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2126 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2128 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2133 new_d_Jmp (dbg_info* db)
2135 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2139 new_d_Cond (dbg_info* db, ir_node *c)
2141 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2145 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2149 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2150 store, callee, arity, in, tp);
2151 #if PRECISE_EXC_CONTEXT
2152 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2159 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2161 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2166 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2168 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2173 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2176 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2178 #if PRECISE_EXC_CONTEXT
2179 allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */
2186 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2189 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2191 #if PRECISE_EXC_CONTEXT
2192 allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */
2199 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2203 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2204 store, size, alloc_type, where);
2205 #if PRECISE_EXC_CONTEXT
2206 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2213 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2215 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2216 store, ptr, size, free_type);
2220 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2221 /* GL: objptr was called frame before. Frame was a bad choice for the name
2222 as the operand could as well be a pointer to a dynamic object. */
2224 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2225 store, objptr, 0, NULL, ent);
2229 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2231 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2232 store, objptr, n_index, index, sel);
2236 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2238 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2239 store, objptr, ent));
2243 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2245 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2250 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2252 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2260 return current_ir_graph->bad;
2264 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2266 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2271 new_d_Unknown (ir_mode *m)
2273 return new_rd_Unknown(current_ir_graph, m);
2277 new_d_CallBegin (dbg_info *db, ir_node *call)
2280 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2285 new_d_EndReg (dbg_info *db)
2288 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2293 new_d_EndExcept (dbg_info *db)
2296 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2301 new_d_Break (dbg_info *db)
2303 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2307 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2309 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2314 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2318 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2319 callee, arity, in, tp);
2324 /* ********************************************************************* */
2325 /* Comfortable interface with automatic Phi node construction. */
2326 /* (Uses also constructors of ?? interface, except new_Block. */
2327 /* ********************************************************************* */
2329 /* * Block construction **/
2330 /* immature Block without predecessors */
2331 ir_node *new_d_immBlock (dbg_info* db) {
2334 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2335 /* creates a new dynamic in-array as length of in is -1 */
2336 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2337 current_ir_graph->current_block = res;
2338 res->attr.block.matured = 0;
2339 /* res->attr.block.exc = exc_normal; */
2340 /* res->attr.block.handler_entry = 0; */
2341 res->attr.block.irg = current_ir_graph;
2342 res->attr.block.backedge = NULL;
2343 res->attr.block.in_cg = NULL;
2344 res->attr.block.cg_backedge = NULL;
2345 set_Block_block_visited(res, 0);
2347 /* Create and initialize array for Phi-node construction. */
2348 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2349 current_ir_graph->n_loc);
2350 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2352 /* Immature block may not be optimized! */
2353 irn_vrfy_irg (res, current_ir_graph);
2360 return new_d_immBlock(NULL);
2363 /* add an adge to a jmp/control flow node */
2365 add_in_edge (ir_node *block, ir_node *jmp)
2367 if (block->attr.block.matured) {
2368 assert(0 && "Error: Block already matured!\n");
2371 assert (jmp != NULL);
2372 ARR_APP1 (ir_node *, block->in, jmp);
2376 /* changing the current block */
2378 switch_block (ir_node *target)
2380 current_ir_graph->current_block = target;
2383 /* ************************ */
2384 /* parameter administration */
2386 /* get a value from the parameter array from the current block by its index */
2388 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2390 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2391 inc_irg_visited(current_ir_graph);
2393 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2395 /* get a value from the parameter array from the current block by its index */
2397 get_value (int pos, ir_mode *mode)
2399 return get_d_value(NULL, pos, mode);
2402 /* set a value at position pos in the parameter array from the current block */
2404 set_value (int pos, ir_node *value)
2406 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2407 assert(pos+1 < current_ir_graph->n_loc);
2408 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2411 /* get the current store */
2415 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2416 /* GL: one could call get_value instead */
2417 inc_irg_visited(current_ir_graph);
2418 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2421 /* set the current store */
2423 set_store (ir_node *store)
2425 /* GL: one could call set_value instead */
2426 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2427 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2431 keep_alive (ir_node *ka)
2433 add_End_keepalive(current_ir_graph->end, ka);
2436 /** Useful access routines **/
2437 /* Returns the current block of the current graph. To set the current
2438 block use switch_block(). */
2439 ir_node *get_cur_block() {
2440 return get_irg_current_block(current_ir_graph);
2443 /* Returns the frame type of the current graph */
2444 type *get_cur_frame_type() {
2445 return get_irg_frame_type(current_ir_graph);
2449 /* ********************************************************************* */
2452 /* call once for each run of the library */
2454 init_cons (default_initialize_local_variable_func_t *func)
2456 default_initialize_local_variable = func;
2459 /* call for each graph */
2461 finalize_cons (ir_graph *irg) {
2462 irg->phase_state = phase_high;
2466 ir_node *new_Block(int arity, ir_node **in) {
2467 return new_d_Block(NULL, arity, in);
2469 ir_node *new_Start (void) {
2470 return new_d_Start(NULL);
2472 ir_node *new_End (void) {
2473 return new_d_End(NULL);
2475 ir_node *new_Jmp (void) {
2476 return new_d_Jmp(NULL);
2478 ir_node *new_Cond (ir_node *c) {
2479 return new_d_Cond(NULL, c);
2481 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2482 return new_d_Return(NULL, store, arity, in);
2484 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2485 return new_d_Raise(NULL, store, obj);
2487 ir_node *new_Const (ir_mode *mode, tarval *con) {
2488 return new_d_Const(NULL, mode, con);
2490 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2491 return new_d_SymConst(NULL, value, kind);
2493 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2494 return new_d_simpleSel(NULL, store, objptr, ent);
2496 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2498 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2500 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2501 return new_d_InstOf (NULL, store, objptr, ent);
2503 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2505 return new_d_Call(NULL, store, callee, arity, in, tp);
2507 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2508 return new_d_Add(NULL, op1, op2, mode);
2510 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2511 return new_d_Sub(NULL, op1, op2, mode);
2513 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2514 return new_d_Minus(NULL, op, mode);
2516 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2517 return new_d_Mul(NULL, op1, op2, mode);
2519 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2520 return new_d_Quot(NULL, memop, op1, op2);
2522 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2523 return new_d_DivMod(NULL, memop, op1, op2);
2525 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2526 return new_d_Div(NULL, memop, op1, op2);
2528 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2529 return new_d_Mod(NULL, memop, op1, op2);
2531 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2532 return new_d_Abs(NULL, op, mode);
2534 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2535 return new_d_And(NULL, op1, op2, mode);
2537 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2538 return new_d_Or(NULL, op1, op2, mode);
2540 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2541 return new_d_Eor(NULL, op1, op2, mode);
2543 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2544 return new_d_Not(NULL, op, mode);
2546 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2547 return new_d_Shl(NULL, op, k, mode);
2549 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2550 return new_d_Shr(NULL, op, k, mode);
2552 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2553 return new_d_Shrs(NULL, op, k, mode);
2555 #define new_Rotate new_Rot
2556 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2557 return new_d_Rot(NULL, op, k, mode);
2559 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2560 return new_d_Cmp(NULL, op1, op2);
2562 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2563 return new_d_Conv(NULL, op, mode);
2565 ir_node *new_Cast (ir_node *op, type *to_tp) {
2566 return new_d_Cast(NULL, op, to_tp);
2568 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2569 return new_d_Phi(NULL, arity, in, mode);
2571 ir_node *new_Load (ir_node *store, ir_node *addr) {
2572 return new_d_Load(NULL, store, addr);
2574 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2575 return new_d_Store(NULL, store, addr, val);
2577 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2578 where_alloc where) {
2579 return new_d_Alloc(NULL, store, size, alloc_type, where);
2581 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2583 return new_d_Free(NULL, store, ptr, size, free_type);
2585 ir_node *new_Sync (int arity, ir_node **in) {
2586 return new_d_Sync(NULL, arity, in);
2588 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2589 return new_d_Proj(NULL, arg, mode, proj);
2591 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2592 return new_d_defaultProj(NULL, arg, max_proj);
2594 ir_node *new_Tuple (int arity, ir_node **in) {
2595 return new_d_Tuple(NULL, arity, in);
2597 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2598 return new_d_Id(NULL, val, mode);
2600 ir_node *new_Bad (void) {
2603 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2604 return new_d_Confirm (NULL, val, bound, cmp);
2606 ir_node *new_Unknown(ir_mode *m) {
2607 return new_d_Unknown(m);
2609 ir_node *new_CallBegin (ir_node *callee) {
2610 return new_d_CallBegin(NULL, callee);
2612 ir_node *new_EndReg (void) {
2613 return new_d_EndReg(NULL);
2615 ir_node *new_EndExcept (void) {
2616 return new_d_EndExcept(NULL);
2618 ir_node *new_Break (void) {
2619 return new_d_Break(NULL);
2621 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2622 return new_d_Filter(NULL, arg, mode, proj);
2624 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2625 return new_d_FuncCall(NULL, callee, arity, in, tp);