3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /*** ******************************************** */
56 /** privat interfaces, for professional use only */
58 /* Constructs a Block with a fixed number of predecessors.
59 Does not set current_block. Can not be used with automatic
60 Phi node construction. */
62 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
66 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
67 set_Block_matured(res, 1);
68 set_Block_block_visited(res, 0);
70 /* res->attr.block.exc = exc_normal; */
71 /* res->attr.block.handler_entry = 0; */
72 res->attr.block.irg = irg;
73 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
74 res->attr.block.in_cg = NULL;
75 res->attr.block.cg_backedge = NULL;
77 IRN_VRFY_IRG(res, irg);
82 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
86 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
87 /* res->attr.start.irg = irg; */
89 IRN_VRFY_IRG(res, irg);
94 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
100 IRN_VRFY_IRG(res, irg);
104 /* Creates a Phi node with all predecessors. Calling this constructor
105 is only allowed if the corresponding block is mature. */
107 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
111 bool has_unknown = false;
113 /* Don't assert that block matured: the use of this constructor is strongly
115 if ( get_Block_matured(block) )
116 assert( get_irn_arity(block) == arity );
118 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
120 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
122 for (i = arity-1; i >= 0; i--)
123 if (get_irn_op(in[i]) == op_Unknown) {
128 if (!has_unknown) res = optimize_node (res);
129 IRN_VRFY_IRG(res, irg);
131 /* Memory Phis in endless loops must be kept alive.
132 As we can't distinguish these easily we keep all of them alive. */
133 if ((res->op == op_Phi) && (mode == mode_M))
134 add_End_keepalive(irg->end, res);
139 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
143 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
144 res->attr.con.tv = con;
145 set_Const_type(res, tp); /* Call method because of complex assertion. */
146 res = optimize_node (res);
147 assert(get_Const_type(res) == tp);
148 IRN_VRFY_IRG(res, irg);
154 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
156 type *tp = unknown_type;
157 /* removing this somehow causes errors in jack. */
158 return new_rd_Const_type (db, irg, block, mode, con, tp);
162 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
166 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
167 res = optimize_node(res);
168 IRN_VRFY_IRG(res, irg);
173 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
178 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
179 res->attr.proj = proj;
182 assert(get_Proj_pred(res));
183 assert(get_nodes_Block(get_Proj_pred(res)));
185 res = optimize_node(res);
187 IRN_VRFY_IRG(res, irg);
193 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
197 assert(arg->op == op_Cond);
198 arg->attr.c.kind = fragmentary;
199 arg->attr.c.default_proj = max_proj;
200 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
205 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
209 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
210 res = optimize_node(res);
211 IRN_VRFY_IRG(res, irg);
216 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
220 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
221 res->attr.cast.totype = to_tp;
222 res = optimize_node(res);
223 IRN_VRFY_IRG(res, irg);
228 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
232 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
233 res = optimize_node (res);
234 IRN_VRFY_IRG(res, irg);
239 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
240 ir_node *op1, ir_node *op2, ir_mode *mode)
247 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
248 res = optimize_node(res);
249 IRN_VRFY_IRG(res, irg);
254 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
255 ir_node *op1, ir_node *op2, ir_mode *mode)
262 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
263 res = optimize_node (res);
264 IRN_VRFY_IRG(res, irg);
269 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
270 ir_node *op, ir_mode *mode)
274 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
275 res = optimize_node(res);
276 IRN_VRFY_IRG(res, irg);
281 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
282 ir_node *op1, ir_node *op2, ir_mode *mode)
289 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
290 res = optimize_node(res);
291 IRN_VRFY_IRG(res, irg);
296 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
297 ir_node *memop, ir_node *op1, ir_node *op2)
305 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
306 res = optimize_node(res);
307 IRN_VRFY_IRG(res, irg);
312 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
313 ir_node *memop, ir_node *op1, ir_node *op2)
321 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
322 res = optimize_node(res);
323 IRN_VRFY_IRG(res, irg);
328 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
329 ir_node *memop, ir_node *op1, ir_node *op2)
337 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
338 res = optimize_node(res);
339 IRN_VRFY_IRG(res, irg);
344 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
345 ir_node *memop, ir_node *op1, ir_node *op2)
353 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
354 res = optimize_node(res);
355 IRN_VRFY_IRG(res, irg);
360 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op1, ir_node *op2, ir_mode *mode)
368 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
369 res = optimize_node(res);
370 IRN_VRFY_IRG(res, irg);
375 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
376 ir_node *op1, ir_node *op2, ir_mode *mode)
383 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
384 res = optimize_node(res);
385 IRN_VRFY_IRG(res, irg);
390 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
391 ir_node *op1, ir_node *op2, ir_mode *mode)
398 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
399 res = optimize_node (res);
400 IRN_VRFY_IRG(res, irg);
405 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
406 ir_node *op, ir_mode *mode)
410 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
418 ir_node *op, ir_node *k, ir_mode *mode)
425 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
433 ir_node *op, ir_node *k, ir_mode *mode)
440 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
441 res = optimize_node(res);
442 IRN_VRFY_IRG(res, irg);
447 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
448 ir_node *op, ir_node *k, ir_mode *mode)
455 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
470 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
478 ir_node *op, ir_mode *mode)
482 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
483 res = optimize_node (res);
484 IRN_VRFY_IRG(res, irg);
489 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
490 ir_node *op1, ir_node *op2)
497 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
498 res = optimize_node(res);
499 IRN_VRFY_IRG(res, irg);
504 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
508 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
509 res = optimize_node (res);
510 IRN_VRFY_IRG (res, irg);
515 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
519 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
520 res->attr.c.kind = dense;
521 res->attr.c.default_proj = 0;
522 res = optimize_node (res);
523 IRN_VRFY_IRG(res, irg);
528 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
529 ir_node *callee, int arity, ir_node **in, type *tp)
536 NEW_ARR_A(ir_node *, r_in, r_arity);
539 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
541 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
543 assert(is_method_type(tp));
544 set_Call_type(res, tp);
545 res->attr.call.callee_arr = NULL;
546 res = optimize_node(res);
547 IRN_VRFY_IRG(res, irg);
552 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
553 ir_node *store, int arity, ir_node **in)
560 NEW_ARR_A (ir_node *, r_in, r_arity);
562 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
563 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
564 res = optimize_node(res);
565 IRN_VRFY_IRG(res, irg);
570 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
577 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
578 res = optimize_node(res);
579 IRN_VRFY_IRG(res, irg);
584 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
585 ir_node *store, ir_node *adr, ir_mode *mode)
592 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
593 res->attr.load.load_mode = mode;
594 res = optimize_node(res);
595 IRN_VRFY_IRG(res, irg);
600 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
601 ir_node *store, ir_node *adr, ir_node *val)
609 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
610 res = optimize_node(res);
611 IRN_VRFY_IRG(res, irg);
616 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
617 ir_node *size, type *alloc_type, where_alloc where)
624 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
625 res->attr.a.where = where;
626 res->attr.a.type = alloc_type;
627 res = optimize_node(res);
628 IRN_VRFY_IRG(res, irg);
633 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
634 ir_node *ptr, ir_node *size, type *free_type)
642 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
643 res->attr.f = free_type;
644 res = optimize_node(res);
645 IRN_VRFY_IRG(res, irg);
650 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
651 int arity, ir_node **in, entity *ent)
657 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
660 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
663 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
664 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
665 res->attr.s.ent = ent;
666 res = optimize_node(res);
667 IRN_VRFY_IRG(res, irg);
672 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
673 ir_node *objptr, type *ent)
680 NEW_ARR_A(ir_node *, r_in, r_arity);
684 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
685 res->attr.io.ent = ent;
687 /* res = optimize(res); */
688 IRN_VRFY_IRG(res, irg);
693 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
694 symconst_kind symkind, type *tp)
699 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
703 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
705 res->attr.i.num = symkind;
706 res->attr.i.sym = value;
709 res = optimize_node(res);
710 IRN_VRFY_IRG(res, irg);
715 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
716 symconst_kind symkind)
718 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
722 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
723 symconst_symbol sym = {(type *)symbol};
724 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
727 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
728 symconst_symbol sym = {(type *)symbol};
729 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
732 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
733 symconst_symbol sym = {symbol};
734 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
737 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
738 symconst_symbol sym = {symbol};
739 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
743 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
747 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
748 res = optimize_node(res);
749 IRN_VRFY_IRG(res, irg);
754 new_rd_Bad (ir_graph *irg)
760 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
762 ir_node *in[2], *res;
766 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
767 res->attr.confirm_cmp = cmp;
768 res = optimize_node (res);
769 IRN_VRFY_IRG(res, irg);
774 new_rd_Unknown (ir_graph *irg, ir_mode *m)
776 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
780 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
785 in[0] = get_Call_ptr(call);
786 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
787 /* res->attr.callbegin.irg = irg; */
788 res->attr.callbegin.call = call;
789 res = optimize_node(res);
790 IRN_VRFY_IRG(res, irg);
795 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
799 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
801 IRN_VRFY_IRG(res, irg);
806 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
810 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
811 irg->end_except = res;
812 IRN_VRFY_IRG (res, irg);
817 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
821 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
822 res = optimize_node(res);
823 IRN_VRFY_IRG(res, irg);
828 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
833 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
834 res->attr.filter.proj = proj;
835 res->attr.filter.in_cg = NULL;
836 res->attr.filter.backedge = NULL;
839 assert(get_Proj_pred(res));
840 assert(get_nodes_Block(get_Proj_pred(res)));
842 res = optimize_node(res);
843 IRN_VRFY_IRG(res, irg);
849 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
850 ir_node *callee, int arity, ir_node **in, type *tp)
857 NEW_ARR_A(ir_node *, r_in, r_arity);
859 memcpy(&r_in[1], in, sizeof (ir_node *) * arity);
861 res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
863 assert(is_method_type(tp));
864 set_FuncCall_type(res, tp);
865 res->attr.call.callee_arr = NULL;
866 res = optimize_node(res);
867 IRN_VRFY_IRG(res, irg);
872 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
873 return new_rd_Block(NULL, irg, arity, in);
875 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
876 return new_rd_Start(NULL, irg, block);
878 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
879 return new_rd_End(NULL, irg, block);
881 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
882 return new_rd_Jmp(NULL, irg, block);
884 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
885 return new_rd_Cond(NULL, irg, block, c);
887 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
888 ir_node *store, int arity, ir_node **in) {
889 return new_rd_Return(NULL, irg, block, store, arity, in);
891 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
892 ir_node *store, ir_node *obj) {
893 return new_rd_Raise(NULL, irg, block, store, obj);
895 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
896 ir_mode *mode, tarval *con) {
897 return new_rd_Const(NULL, irg, block, mode, con);
899 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
900 symconst_symbol value, symconst_kind symkind) {
901 return new_rd_SymConst(NULL, irg, block, value, symkind);
903 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
904 ir_node *objptr, int n_index, ir_node **index,
906 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
908 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
910 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
912 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
913 ir_node *callee, int arity, ir_node **in,
915 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
917 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
918 ir_node *op1, ir_node *op2, ir_mode *mode) {
919 return new_rd_Add(NULL, irg, block, op1, op2, mode);
921 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
922 ir_node *op1, ir_node *op2, ir_mode *mode) {
923 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
925 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
926 ir_node *op, ir_mode *mode) {
927 return new_rd_Minus(NULL, irg, block, op, mode);
929 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
930 ir_node *op1, ir_node *op2, ir_mode *mode) {
931 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
933 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
934 ir_node *memop, ir_node *op1, ir_node *op2) {
935 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
937 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
938 ir_node *memop, ir_node *op1, ir_node *op2) {
939 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
941 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
942 ir_node *memop, ir_node *op1, ir_node *op2) {
943 return new_rd_Div(NULL, irg, block, memop, op1, op2);
945 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
946 ir_node *memop, ir_node *op1, ir_node *op2) {
947 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
949 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
950 ir_node *op, ir_mode *mode) {
951 return new_rd_Abs(NULL, irg, block, op, mode);
953 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
954 ir_node *op1, ir_node *op2, ir_mode *mode) {
955 return new_rd_And(NULL, irg, block, op1, op2, mode);
957 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
958 ir_node *op1, ir_node *op2, ir_mode *mode) {
959 return new_rd_Or(NULL, irg, block, op1, op2, mode);
961 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
962 ir_node *op1, ir_node *op2, ir_mode *mode) {
963 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
965 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
966 ir_node *op, ir_mode *mode) {
967 return new_rd_Not(NULL, irg, block, op, mode);
969 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
970 ir_node *op1, ir_node *op2) {
971 return new_rd_Cmp(NULL, irg, block, op1, op2);
973 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
974 ir_node *op, ir_node *k, ir_mode *mode) {
975 return new_rd_Shl(NULL, irg, block, op, k, mode);
977 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
978 ir_node *op, ir_node *k, ir_mode *mode) {
979 return new_rd_Shr(NULL, irg, block, op, k, mode);
981 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
982 ir_node *op, ir_node *k, ir_mode *mode) {
983 return new_rd_Shrs(NULL, irg, block, op, k, mode);
985 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
986 ir_node *op, ir_node *k, ir_mode *mode) {
987 return new_rd_Rot(NULL, irg, block, op, k, mode);
989 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
990 ir_node *op, ir_mode *mode) {
991 return new_rd_Conv(NULL, irg, block, op, mode);
993 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
994 return new_rd_Cast(NULL, irg, block, op, to_tp);
996 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
997 ir_node **in, ir_mode *mode) {
998 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1000 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1001 ir_node *store, ir_node *adr, ir_mode *mode) {
1002 return new_rd_Load(NULL, irg, block, store, adr, mode);
1004 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1005 ir_node *store, ir_node *adr, ir_node *val) {
1006 return new_rd_Store(NULL, irg, block, store, adr, val);
1008 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1009 ir_node *size, type *alloc_type, where_alloc where) {
1010 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1012 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1013 ir_node *ptr, ir_node *size, type *free_type) {
1014 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1016 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1017 return new_rd_Sync(NULL, irg, block, arity, in);
1019 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1020 ir_mode *mode, long proj) {
1021 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1023 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1025 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1027 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1028 int arity, ir_node **in) {
1029 return new_rd_Tuple(NULL, irg, block, arity, in );
1031 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1032 ir_node *val, ir_mode *mode) {
1033 return new_rd_Id(NULL, irg, block, val, mode);
1035 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1036 return new_rd_Bad(irg);
1038 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1039 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1041 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1042 return new_rd_Unknown(irg, m);
1044 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1045 return new_rd_CallBegin(NULL, irg, block, callee);
1047 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1048 return new_rd_EndReg(NULL, irg, block);
1050 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1051 return new_rd_EndExcept(NULL, irg, block);
1053 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1054 return new_rd_Break(NULL, irg, block);
1056 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1057 ir_mode *mode, long proj) {
1058 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1060 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1061 ir_node *callee, int arity, ir_node **in,
1063 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1067 /** ********************/
1068 /** public interfaces */
1069 /** construction tools */
1073 * - create a new Start node in the current block
1075 * @return s - pointer to the created Start node
1080 new_d_Start (dbg_info* db)
1084 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1085 op_Start, mode_T, 0, NULL);
1086 /* res->attr.start.irg = current_ir_graph; */
1088 res = optimize_node(res);
1089 IRN_VRFY_IRG(res, current_ir_graph);
1094 new_d_End (dbg_info* db)
1097 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1098 op_End, mode_X, -1, NULL);
1099 res = optimize_node(res);
1100 IRN_VRFY_IRG(res, current_ir_graph);
1105 /* Constructs a Block with a fixed number of predecessors.
1106 Does set current_block. Can be used with automatic Phi
1107 node construction. */
1109 new_d_Block (dbg_info* db, int arity, ir_node **in)
1113 bool has_unknown = false;
1115 res = new_rd_Block(db, current_ir_graph, arity, in);
1117 /* Create and initialize array for Phi-node construction. */
1118 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1119 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1120 current_ir_graph->n_loc);
1121 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1124 for (i = arity-1; i >= 0; i--)
1125 if (get_irn_op(in[i]) == op_Unknown) {
1130 if (!has_unknown) res = optimize_node(res);
1131 current_ir_graph->current_block = res;
1133 IRN_VRFY_IRG(res, current_ir_graph);
1138 /* ***********************************************************************/
1139 /* Methods necessary for automatic Phi node creation */
1141 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1142 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1143 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1144 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1146 Call Graph: ( A ---> B == A "calls" B)
1148 get_value mature_immBlock
1156 get_r_value_internal |
1160 new_rd_Phi0 new_rd_Phi_in
1162 * *************************************************************************** */
1164 /** Creates a Phi node with 0 predecessors */
1165 static INLINE ir_node *
1166 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1170 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1171 IRN_VRFY_IRG(res, irg);
1175 /* There are two implementations of the Phi node construction. The first
1176 is faster, but does not work for blocks with more than 2 predecessors.
1177 The second works always but is slower and causes more unnecessary Phi
1179 Select the implementations by the following preprocessor flag set in
1181 #if USE_FAST_PHI_CONSTRUCTION
1183 /* This is a stack used for allocating and deallocating nodes in
1184 new_rd_Phi_in. The original implementation used the obstack
1185 to model this stack, now it is explicit. This reduces side effects.
1187 #if USE_EXPLICIT_PHI_IN_STACK
1188 INLINE Phi_in_stack *
1189 new_Phi_in_stack(void) {
1192 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1194 res->stack = NEW_ARR_F (ir_node *, 0);
1201 free_Phi_in_stack(Phi_in_stack *s) {
1202 DEL_ARR_F(s->stack);
1206 free_to_Phi_in_stack(ir_node *phi) {
1207 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1208 current_ir_graph->Phi_in_stack->pos)
1209 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1211 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1213 (current_ir_graph->Phi_in_stack->pos)++;
1216 static INLINE ir_node *
1217 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1218 int arity, ir_node **in) {
1220 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1221 int pos = current_ir_graph->Phi_in_stack->pos;
1225 /* We need to allocate a new node */
1226 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1227 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1229 /* reuse the old node and initialize it again. */
1232 assert (res->kind == k_ir_node);
1233 assert (res->op == op_Phi);
1237 assert (arity >= 0);
1238 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1239 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1241 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1243 (current_ir_graph->Phi_in_stack->pos)--;
1247 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1249 /* Creates a Phi node with a given, fixed array **in of predecessors.
1250 If the Phi node is unnecessary, as the same value reaches the block
1251 through all control flow paths, it is eliminated and the value
1252 returned directly. This constructor is only intended for use in
1253 the automatic Phi node generation triggered by get_value or mature.
1254 The implementation is quite tricky and depends on the fact, that
1255 the nodes are allocated on a stack:
1256 The in array contains predecessors and NULLs. The NULLs appear,
1257 if get_r_value_internal, that computed the predecessors, reached
1258 the same block on two paths. In this case the same value reaches
1259 this block on both paths, there is no definition in between. We need
1260 not allocate a Phi where these path's merge, but we have to communicate
1261 this fact to the caller. This happens by returning a pointer to the
1262 node the caller _will_ allocate. (Yes, we predict the address. We can
1263 do so because the nodes are allocated on the obstack.) The caller then
1264 finds a pointer to itself and, when this routine is called again,
1267 static INLINE ir_node *
1268 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1271 ir_node *res, *known;
1273 /* Allocate a new node on the obstack. This can return a node to
1274 which some of the pointers in the in-array already point.
1275 Attention: the constructor copies the in array, i.e., the later
1276 changes to the array in this routine do not affect the
1277 constructed node! If the in array contains NULLs, there will be
1278 missing predecessors in the returned node. Is this a possible
1279 internal state of the Phi node generation? */
1280 #if USE_EXPLICIT_PHI_IN_STACK
1281 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1283 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1284 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1287 /* The in-array can contain NULLs. These were returned by
1288 get_r_value_internal if it reached the same block/definition on a
1289 second path. The NULLs are replaced by the node itself to
1290 simplify the test in the next loop. */
1291 for (i = 0; i < ins; ++i) {
1296 /* This loop checks whether the Phi has more than one predecessor.
1297 If so, it is a real Phi node and we break the loop. Else the Phi
1298 node merges the same definition on several paths and therefore is
1300 for (i = 0; i < ins; ++i)
1302 if (in[i] == res || in[i] == known) continue;
1310 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1312 #if USE_EXPLICIT_PHI_IN_STACK
1313 free_to_Phi_in_stack(res);
1315 obstack_free (current_ir_graph->obst, res);
1319 res = optimize_node (res);
1320 IRN_VRFY_IRG(res, irg);
1323 /* return the pointer to the Phi node. This node might be deallocated! */
1328 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1331 allocates and returns this node. The routine called to allocate the
1332 node might optimize it away and return a real value, or even a pointer
1333 to a deallocated Phi node on top of the obstack!
1334 This function is called with an in-array of proper size. **/
1336 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1338 ir_node *prevBlock, *res;
1341 /* This loop goes to all predecessor blocks of the block the Phi node is in
1342 and there finds the operands of the Phi node by calling
1343 get_r_value_internal. */
1344 for (i = 1; i <= ins; ++i) {
1345 assert (block->in[i]);
1346 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1348 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1351 /* After collecting all predecessors into the array nin a new Phi node
1352 with these predecessors is created. This constructor contains an
1353 optimization: If all predecessors of the Phi node are identical it
1354 returns the only operand instead of a new Phi node. If the value
1355 passes two different control flow edges without being defined, and
1356 this is the second path treated, a pointer to the node that will be
1357 allocated for the first path (recursion) is returned. We already
1358 know the address of this node, as it is the next node to be allocated
1359 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1360 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1362 /* Now we now the value for "pos" and can enter it in the array with
1363 all known local variables. Attention: this might be a pointer to
1364 a node, that later will be allocated!!! See new_rd_Phi_in.
1365 If this is called in mature, after some set_value in the same block,
1366 the proper value must not be overwritten:
1368 get_value (makes Phi0, put's it into graph_arr)
1369 set_value (overwrites Phi0 in graph_arr)
1370 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1373 if (!block->attr.block.graph_arr[pos]) {
1374 block->attr.block.graph_arr[pos] = res;
1376 /* printf(" value already computed by %s\n",
1377 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1383 /* This function returns the last definition of a variable. In case
1384 this variable was last defined in a previous block, Phi nodes are
1385 inserted. If the part of the firm graph containing the definition
1386 is not yet constructed, a dummy Phi node is returned. */
1388 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1391 /* There are 4 cases to treat.
1393 1. The block is not mature and we visit it the first time. We can not
1394 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1395 predecessors is returned. This node is added to the linked list (field
1396 "link") of the containing block to be completed when this block is
1397 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1400 2. The value is already known in this block, graph_arr[pos] is set and we
1401 visit the block the first time. We can return the value without
1402 creating any new nodes.
1404 3. The block is mature and we visit it the first time. A Phi node needs
1405 to be created (phi_merge). If the Phi is not needed, as all it's
1406 operands are the same value reaching the block through different
1407 paths, it's optimized away and the value itself is returned.
1409 4. The block is mature, and we visit it the second time. Now two
1410 subcases are possible:
1411 * The value was computed completely the last time we were here. This
1412 is the case if there is no loop. We can return the proper value.
1413 * The recursion that visited this node and set the flag did not
1414 return yet. We are computing a value in a loop and need to
1415 break the recursion without knowing the result yet.
1416 @@@ strange case. Straight forward we would create a Phi before
1417 starting the computation of it's predecessors. In this case we will
1418 find a Phi here in any case. The problem is that this implementation
1419 only creates a Phi after computing the predecessors, so that it is
1420 hard to compute self references of this Phi. @@@
1421 There is no simple check for the second subcase. Therefore we check
1422 for a second visit and treat all such cases as the second subcase.
1423 Anyways, the basic situation is the same: we reached a block
1424 on two paths without finding a definition of the value: No Phi
1425 nodes are needed on both paths.
1426 We return this information "Two paths, no Phi needed" by a very tricky
1427 implementation that relies on the fact that an obstack is a stack and
1428 will return a node with the same address on different allocations.
1429 Look also at phi_merge and new_rd_phi_in to understand this.
1430 @@@ Unfortunately this does not work, see testprogram
1431 three_cfpred_example.
1435 /* case 4 -- already visited. */
1436 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1438 /* visited the first time */
1439 set_irn_visited(block, get_irg_visited(current_ir_graph));
1441 /* Get the local valid value */
1442 res = block->attr.block.graph_arr[pos];
1444 /* case 2 -- If the value is actually computed, return it. */
1445 if (res) return res;
1447 if (block->attr.block.matured) { /* case 3 */
1449 /* The Phi has the same amount of ins as the corresponding block. */
1450 int ins = get_irn_arity(block);
1452 NEW_ARR_A (ir_node *, nin, ins);
1454 /* Phi merge collects the predecessors and then creates a node. */
1455 res = phi_merge (block, pos, mode, nin, ins);
1457 } else { /* case 1 */
1458 /* The block is not mature, we don't know how many in's are needed. A Phi
1459 with zero predecessors is created. Such a Phi node is called Phi0
1460 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1461 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1463 The Phi0 has to remember the pos of it's internal value. If the real
1464 Phi is computed, pos is used to update the array with the local
1467 res = new_rd_Phi0 (current_ir_graph, block, mode);
1468 res->attr.phi0_pos = pos;
1469 res->link = block->link;
1473 /* If we get here, the frontend missed a use-before-definition error */
1476 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1477 assert (mode->code >= irm_F && mode->code <= irm_P);
1478 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1479 tarval_mode_null[mode->code]);
1482 /* The local valid value is available now. */
1483 block->attr.block.graph_arr[pos] = res;
1491 it starts the recursion. This causes an Id at the entry of
1492 every block that has no definition of the value! **/
1494 #if USE_EXPLICIT_PHI_IN_STACK
1496 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1497 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1500 static INLINE ir_node *
1501 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1502 ir_node **in, int ins, ir_node *phi0)
1505 ir_node *res, *known;
1507 /* Allocate a new node on the obstack. The allocation copies the in
1509 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1510 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1512 /* This loop checks whether the Phi has more than one predecessor.
1513 If so, it is a real Phi node and we break the loop. Else the
1514 Phi node merges the same definition on several paths and therefore
1515 is not needed. Don't consider Bad nodes! */
1517 for (i=0; i < ins; ++i)
1521 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1523 /* Optimize self referencing Phis: We can't detect them yet properly, as
1524 they still refer to the Phi0 they will replace. So replace right now. */
1525 if (phi0 && in[i] == phi0) in[i] = res;
1527 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1535 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1538 obstack_free (current_ir_graph->obst, res);
1539 if (is_Phi(known)) {
1540 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1541 order, an enclosing Phi know may get superfluous. */
1542 res = optimize_in_place_2(known);
1543 if (res != known) { exchange(known, res); }
1548 /* A undefined value, e.g., in unreachable code. */
1552 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1553 IRN_VRFY_IRG(res, irg);
1554 /* Memory Phis in endless loops must be kept alive.
1555 As we can't distinguish these easily we keep all of them alive. */
1556 if ((res->op == op_Phi) && (mode == mode_M))
1557 add_End_keepalive(irg->end, res);
1564 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1566 #if PRECISE_EXC_CONTEXT
1568 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1570 /* Construct a new frag_array for node n.
1571 Copy the content from the current graph_arr of the corresponding block:
1572 this is the current state.
1573 Set ProjM(n) as current memory state.
1574 Further the last entry in frag_arr of current block points to n. This
1575 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1577 static INLINE ir_node ** new_frag_arr (ir_node *n)
1582 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1583 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1584 sizeof(ir_node *)*current_ir_graph->n_loc);
1586 /* turn off optimization before allocating Proj nodes, as res isn't
1588 opt = get_opt_optimize(); set_optimize(0);
1589 /* Here we rely on the fact that all frag ops have Memory as first result! */
1590 if (get_irn_op(n) == op_Call)
1591 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1593 assert((pn_Quot_M == pn_DivMod_M) &&
1594 (pn_Quot_M == pn_Div_M) &&
1595 (pn_Quot_M == pn_Mod_M) &&
1596 (pn_Quot_M == pn_Load_M) &&
1597 (pn_Quot_M == pn_Store_M) &&
1598 (pn_Quot_M == pn_Alloc_M) );
1599 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1603 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1608 * returns the frag_arr from a node
1610 static INLINE ir_node **
1611 get_frag_arr (ir_node *n) {
1612 switch (get_irn_opcode(n)) {
1614 return n->attr.call.frag_arr;
1616 return n->attr.a.frag_arr;
1618 return n->attr.load.frag_arr;
1620 return n->attr.store.frag_arr;
1622 return n->attr.except.frag_arr;
1627 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1629 if (!frag_arr[pos]) frag_arr[pos] = val;
1630 if (frag_arr[current_ir_graph->n_loc - 1]) {
1631 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1632 assert(arr != frag_arr && "Endless recursion detected");
1633 set_frag_value(arr, pos, val);
1638 for (i = 0; i < 1000; ++i) {
1639 if (!frag_arr[pos]) {
1640 frag_arr[pos] = val;
1642 if (frag_arr[current_ir_graph->n_loc - 1]) {
1643 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1649 assert(0 && "potential endless recursion");
1654 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1658 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1660 frag_arr = get_frag_arr(cfOp);
1661 res = frag_arr[pos];
1663 if (block->attr.block.graph_arr[pos]) {
1664 /* There was a set_value after the cfOp and no get_value before that
1665 set_value. We must build a Phi node now. */
1666 if (block->attr.block.matured) {
1667 int ins = get_irn_arity(block);
1669 NEW_ARR_A (ir_node *, nin, ins);
1670 res = phi_merge(block, pos, mode, nin, ins);
1672 res = new_rd_Phi0 (current_ir_graph, block, mode);
1673 res->attr.phi0_pos = pos;
1674 res->link = block->link;
1678 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1679 but this should be better: (remove comment if this works) */
1680 /* It's a Phi, we can write this into all graph_arrs with NULL */
1681 set_frag_value(block->attr.block.graph_arr, pos, res);
1683 res = get_r_value_internal(block, pos, mode);
1684 set_frag_value(block->attr.block.graph_arr, pos, res);
1692 computes the predecessors for the real phi node, and then
1693 allocates and returns this node. The routine called to allocate the
1694 node might optimize it away and return a real value.
1695 This function must be called with an in-array of proper size. **/
1697 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1699 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1702 /* If this block has no value at pos create a Phi0 and remember it
1703 in graph_arr to break recursions.
1704 Else we may not set graph_arr as there a later value is remembered. */
1706 if (!block->attr.block.graph_arr[pos]) {
1707 if (block == get_irg_start_block(current_ir_graph)) {
1708 /* Collapsing to Bad tarvals is no good idea.
1709 So we call a user-supplied routine here that deals with this case as
1710 appropriate for the given language. Sorryly the only help we can give
1711 here is the position.
1713 Even if all variables are defined before use, it can happen that
1714 we get to the start block, if a cond has been replaced by a tuple
1715 (bad, jmp). In this case we call the function needlessly, eventually
1716 generating an non existant error.
1717 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1720 if (default_initialize_local_variable)
1721 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1723 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1724 /* We don't need to care about exception ops in the start block.
1725 There are none by definition. */
1726 return block->attr.block.graph_arr[pos];
1728 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1729 block->attr.block.graph_arr[pos] = phi0;
1730 #if PRECISE_EXC_CONTEXT
1731 if (get_opt_precise_exc_context()) {
1732 /* Set graph_arr for fragile ops. Also here we should break recursion.
1733 We could choose a cyclic path through an cfop. But the recursion would
1734 break at some point. */
1735 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1741 /* This loop goes to all predecessor blocks of the block the Phi node
1742 is in and there finds the operands of the Phi node by calling
1743 get_r_value_internal. */
1744 for (i = 1; i <= ins; ++i) {
1745 prevCfOp = skip_Proj(block->in[i]);
1747 if (is_Bad(prevCfOp)) {
1748 /* In case a Cond has been optimized we would get right to the start block
1749 with an invalid definition. */
1750 nin[i-1] = new_Bad();
1753 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1755 if (!is_Bad(prevBlock)) {
1756 #if PRECISE_EXC_CONTEXT
1757 if (get_opt_precise_exc_context() &&
1758 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1759 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1760 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1763 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1765 nin[i-1] = new_Bad();
1769 /* We want to pass the Phi0 node to the constructor: this finds additional
1770 optimization possibilities.
1771 The Phi0 node either is allocated in this function, or it comes from
1772 a former call to get_r_value_internal. In this case we may not yet
1773 exchange phi0, as this is done in mature_immBlock. */
1775 phi0_all = block->attr.block.graph_arr[pos];
1776 if (!((get_irn_op(phi0_all) == op_Phi) &&
1777 (get_irn_arity(phi0_all) == 0) &&
1778 (get_nodes_block(phi0_all) == block)))
1784 /* After collecting all predecessors into the array nin a new Phi node
1785 with these predecessors is created. This constructor contains an
1786 optimization: If all predecessors of the Phi node are identical it
1787 returns the only operand instead of a new Phi node. */
1788 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1790 /* In case we allocated a Phi0 node at the beginning of this procedure,
1791 we need to exchange this Phi0 with the real Phi. */
1793 exchange(phi0, res);
1794 block->attr.block.graph_arr[pos] = res;
1795 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1796 only an optimization. */
1802 /* This function returns the last definition of a variable. In case
1803 this variable was last defined in a previous block, Phi nodes are
1804 inserted. If the part of the firm graph containing the definition
1805 is not yet constructed, a dummy Phi node is returned. */
1807 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1810 /* There are 4 cases to treat.
1812 1. The block is not mature and we visit it the first time. We can not
1813 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1814 predecessors is returned. This node is added to the linked list (field
1815 "link") of the containing block to be completed when this block is
1816 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1819 2. The value is already known in this block, graph_arr[pos] is set and we
1820 visit the block the first time. We can return the value without
1821 creating any new nodes.
1823 3. The block is mature and we visit it the first time. A Phi node needs
1824 to be created (phi_merge). If the Phi is not needed, as all it's
1825 operands are the same value reaching the block through different
1826 paths, it's optimized away and the value itself is returned.
1828 4. The block is mature, and we visit it the second time. Now two
1829 subcases are possible:
1830 * The value was computed completely the last time we were here. This
1831 is the case if there is no loop. We can return the proper value.
1832 * The recursion that visited this node and set the flag did not
1833 return yet. We are computing a value in a loop and need to
1834 break the recursion. This case only happens if we visited
1835 the same block with phi_merge before, which inserted a Phi0.
1836 So we return the Phi0.
1839 /* case 4 -- already visited. */
1840 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1841 /* As phi_merge allocates a Phi0 this value is always defined. Here
1842 is the critical difference of the two algorithms. */
1843 assert(block->attr.block.graph_arr[pos]);
1844 return block->attr.block.graph_arr[pos];
1847 /* visited the first time */
1848 set_irn_visited(block, get_irg_visited(current_ir_graph));
1850 /* Get the local valid value */
1851 res = block->attr.block.graph_arr[pos];
1853 /* case 2 -- If the value is actually computed, return it. */
1854 if (res) { return res; };
1856 if (block->attr.block.matured) { /* case 3 */
1858 /* The Phi has the same amount of ins as the corresponding block. */
1859 int ins = get_irn_arity(block);
1861 NEW_ARR_A (ir_node *, nin, ins);
1863 /* Phi merge collects the predecessors and then creates a node. */
1864 res = phi_merge (block, pos, mode, nin, ins);
1866 } else { /* case 1 */
1867 /* The block is not mature, we don't know how many in's are needed. A Phi
1868 with zero predecessors is created. Such a Phi node is called Phi0
1869 node. The Phi0 is then added to the list of Phi0 nodes in this block
1870 to be matured by mature_immBlock later.
1871 The Phi0 has to remember the pos of it's internal value. If the real
1872 Phi is computed, pos is used to update the array with the local
1874 res = new_rd_Phi0 (current_ir_graph, block, mode);
1875 res->attr.phi0_pos = pos;
1876 res->link = block->link;
1880 /* If we get here, the frontend missed a use-before-definition error */
1883 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1884 assert (mode->code >= irm_F && mode->code <= irm_P);
1885 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1886 get_mode_null(mode));
1889 /* The local valid value is available now. */
1890 block->attr.block.graph_arr[pos] = res;
1895 #endif /* USE_FAST_PHI_CONSTRUCTION */
1897 /* ************************************************************************** */
1899 /** Finalize a Block node, when all control flows are known. */
1900 /** Acceptable parameters are only Block nodes. */
1902 mature_immBlock (ir_node *block)
1909 assert (get_irn_opcode(block) == iro_Block);
1910 /* @@@ should be commented in
1911 assert (!get_Block_matured(block) && "Block already matured"); */
1913 if (!get_Block_matured(block)) {
1914 ins = ARR_LEN (block->in)-1;
1915 /* Fix block parameters */
1916 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1918 /* An array for building the Phi nodes. */
1919 NEW_ARR_A (ir_node *, nin, ins);
1921 /* Traverse a chain of Phi nodes attached to this block and mature
1923 for (n = block->link; n; n=next) {
1924 inc_irg_visited(current_ir_graph);
1926 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1929 block->attr.block.matured = 1;
1931 /* Now, as the block is a finished firm node, we can optimize it.
1932 Since other nodes have been allocated since the block was created
1933 we can not free the node on the obstack. Therefore we have to call
1935 Unfortunately the optimization does not change a lot, as all allocated
1936 nodes refer to the unoptimized node.
1937 We can call _2, as global cse has no effect on blocks. */
1938 block = optimize_in_place_2(block);
1939 IRN_VRFY_IRG(block, current_ir_graph);
1944 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1946 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1951 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1953 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1958 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1960 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1966 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1968 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1973 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1975 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1980 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1983 assert(arg->op == op_Cond);
1984 arg->attr.c.kind = fragmentary;
1985 arg->attr.c.default_proj = max_proj;
1986 res = new_Proj (arg, mode_X, max_proj);
1991 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1993 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
1998 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2000 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2004 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2006 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2011 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2013 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2018 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2020 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2026 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2028 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2033 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2035 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2040 * allocate the frag array
2042 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2043 if (get_opt_precise_exc_context()) {
2044 if ((current_ir_graph->phase_state == phase_building) &&
2045 (get_irn_op(res) == op) && /* Could be optimized away. */
2046 !*frag_store) /* Could be a cse where the arr is already set. */ {
2047 *frag_store = new_frag_arr(res);
2054 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2057 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2059 #if PRECISE_EXC_CONTEXT
2060 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2067 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2070 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2072 #if PRECISE_EXC_CONTEXT
2073 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2080 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2083 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2085 #if PRECISE_EXC_CONTEXT
2086 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2093 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2096 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2098 #if PRECISE_EXC_CONTEXT
2099 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2106 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2108 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2113 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2115 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2120 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2122 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2127 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2129 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2134 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2136 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2141 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2143 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2148 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2150 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2155 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2157 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2162 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2164 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2169 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2171 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2176 new_d_Jmp (dbg_info* db)
2178 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2182 new_d_Cond (dbg_info* db, ir_node *c)
2184 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2188 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2192 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2193 store, callee, arity, in, tp);
2194 #if PRECISE_EXC_CONTEXT
2195 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2202 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2204 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2209 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2211 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2216 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2219 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2221 #if PRECISE_EXC_CONTEXT
2222 allocate_frag_arr(res, op_Load, &res->attr.load.frag_arr); /* Could be optimized away. */
2229 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2232 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2234 #if PRECISE_EXC_CONTEXT
2235 allocate_frag_arr(res, op_Store, &res->attr.store.frag_arr); /* Could be optimized away. */
2242 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2246 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2247 store, size, alloc_type, where);
2248 #if PRECISE_EXC_CONTEXT
2249 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2256 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2258 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2259 store, ptr, size, free_type);
2263 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2264 /* GL: objptr was called frame before. Frame was a bad choice for the name
2265 as the operand could as well be a pointer to a dynamic object. */
2267 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2268 store, objptr, 0, NULL, ent);
2272 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2274 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2275 store, objptr, n_index, index, sel);
2279 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2281 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2282 store, objptr, ent));
2286 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2288 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2293 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2295 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2300 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2302 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2310 return __new_d_Bad();
2314 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2316 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2321 new_d_Unknown (ir_mode *m)
2323 return new_rd_Unknown(current_ir_graph, m);
2327 new_d_CallBegin (dbg_info *db, ir_node *call)
2330 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2335 new_d_EndReg (dbg_info *db)
2338 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2343 new_d_EndExcept (dbg_info *db)
2346 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2351 new_d_Break (dbg_info *db)
2353 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2357 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2359 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2364 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2368 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2369 callee, arity, in, tp);
2374 /* ********************************************************************* */
2375 /* Comfortable interface with automatic Phi node construction. */
2376 /* (Uses also constructors of ?? interface, except new_Block. */
2377 /* ********************************************************************* */
2379 /* * Block construction **/
2380 /* immature Block without predecessors */
2381 ir_node *new_d_immBlock (dbg_info* db) {
2384 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2385 /* creates a new dynamic in-array as length of in is -1 */
2386 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2387 current_ir_graph->current_block = res;
2388 res->attr.block.matured = 0;
2389 /* res->attr.block.exc = exc_normal; */
2390 /* res->attr.block.handler_entry = 0; */
2391 res->attr.block.irg = current_ir_graph;
2392 res->attr.block.backedge = NULL;
2393 res->attr.block.in_cg = NULL;
2394 res->attr.block.cg_backedge = NULL;
2395 set_Block_block_visited(res, 0);
2397 /* Create and initialize array for Phi-node construction. */
2398 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2399 current_ir_graph->n_loc);
2400 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2402 /* Immature block may not be optimized! */
2403 IRN_VRFY_IRG(res, current_ir_graph);
2409 new_immBlock (void) {
2410 return new_d_immBlock(NULL);
2413 /* add an adge to a jmp/control flow node */
2415 add_immBlock_pred (ir_node *block, ir_node *jmp)
2417 if (block->attr.block.matured) {
2418 assert(0 && "Error: Block already matured!\n");
2421 assert(jmp != NULL);
2422 ARR_APP1(ir_node *, block->in, jmp);
2426 /* changing the current block */
2428 set_cur_block (ir_node *target)
2430 current_ir_graph->current_block = target;
2433 /* ************************ */
2434 /* parameter administration */
2436 /* get a value from the parameter array from the current block by its index */
2438 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2440 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2441 inc_irg_visited(current_ir_graph);
2443 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2445 /* get a value from the parameter array from the current block by its index */
2447 get_value (int pos, ir_mode *mode)
2449 return get_d_value(NULL, pos, mode);
2452 /* set a value at position pos in the parameter array from the current block */
2454 set_value (int pos, ir_node *value)
2456 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2457 assert(pos+1 < current_ir_graph->n_loc);
2458 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2461 /* get the current store */
2465 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2466 /* GL: one could call get_value instead */
2467 inc_irg_visited(current_ir_graph);
2468 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2471 /* set the current store */
2473 set_store (ir_node *store)
2475 /* GL: one could call set_value instead */
2476 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2477 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2481 keep_alive (ir_node *ka)
2483 add_End_keepalive(current_ir_graph->end, ka);
2486 /** Useful access routines **/
2487 /* Returns the current block of the current graph. To set the current
2488 block use set_cur_block. */
2489 ir_node *get_cur_block() {
2490 return get_irg_current_block(current_ir_graph);
2493 /* Returns the frame type of the current graph */
2494 type *get_cur_frame_type() {
2495 return get_irg_frame_type(current_ir_graph);
2499 /* ********************************************************************* */
2502 /* call once for each run of the library */
2504 init_cons (default_initialize_local_variable_func_t *func)
2506 default_initialize_local_variable = func;
2509 /* call for each graph */
2511 finalize_cons (ir_graph *irg) {
2512 irg->phase_state = phase_high;
2516 ir_node *new_Block(int arity, ir_node **in) {
2517 return new_d_Block(NULL, arity, in);
2519 ir_node *new_Start (void) {
2520 return new_d_Start(NULL);
2522 ir_node *new_End (void) {
2523 return new_d_End(NULL);
2525 ir_node *new_Jmp (void) {
2526 return new_d_Jmp(NULL);
2528 ir_node *new_Cond (ir_node *c) {
2529 return new_d_Cond(NULL, c);
2531 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2532 return new_d_Return(NULL, store, arity, in);
2534 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2535 return new_d_Raise(NULL, store, obj);
2537 ir_node *new_Const (ir_mode *mode, tarval *con) {
2538 return new_d_Const(NULL, mode, con);
2540 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2541 return new_d_SymConst(NULL, value, kind);
2543 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2544 return new_d_simpleSel(NULL, store, objptr, ent);
2546 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2548 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2550 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2551 return new_d_InstOf (NULL, store, objptr, ent);
2553 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2555 return new_d_Call(NULL, store, callee, arity, in, tp);
2557 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2558 return new_d_Add(NULL, op1, op2, mode);
2560 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2561 return new_d_Sub(NULL, op1, op2, mode);
2563 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2564 return new_d_Minus(NULL, op, mode);
2566 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2567 return new_d_Mul(NULL, op1, op2, mode);
2569 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2570 return new_d_Quot(NULL, memop, op1, op2);
2572 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2573 return new_d_DivMod(NULL, memop, op1, op2);
2575 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2576 return new_d_Div(NULL, memop, op1, op2);
2578 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2579 return new_d_Mod(NULL, memop, op1, op2);
2581 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2582 return new_d_Abs(NULL, op, mode);
2584 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2585 return new_d_And(NULL, op1, op2, mode);
2587 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2588 return new_d_Or(NULL, op1, op2, mode);
2590 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2591 return new_d_Eor(NULL, op1, op2, mode);
2593 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2594 return new_d_Not(NULL, op, mode);
2596 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2597 return new_d_Shl(NULL, op, k, mode);
2599 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2600 return new_d_Shr(NULL, op, k, mode);
2602 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2603 return new_d_Shrs(NULL, op, k, mode);
2605 #define new_Rotate new_Rot
2606 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2607 return new_d_Rot(NULL, op, k, mode);
2609 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2610 return new_d_Cmp(NULL, op1, op2);
2612 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2613 return new_d_Conv(NULL, op, mode);
2615 ir_node *new_Cast (ir_node *op, type *to_tp) {
2616 return new_d_Cast(NULL, op, to_tp);
2618 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2619 return new_d_Phi(NULL, arity, in, mode);
2621 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2622 return new_d_Load(NULL, store, addr, mode);
2624 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2625 return new_d_Store(NULL, store, addr, val);
2627 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2628 where_alloc where) {
2629 return new_d_Alloc(NULL, store, size, alloc_type, where);
2631 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2633 return new_d_Free(NULL, store, ptr, size, free_type);
2635 ir_node *new_Sync (int arity, ir_node **in) {
2636 return new_d_Sync(NULL, arity, in);
2638 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2639 return new_d_Proj(NULL, arg, mode, proj);
2641 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2642 return new_d_defaultProj(NULL, arg, max_proj);
2644 ir_node *new_Tuple (int arity, ir_node **in) {
2645 return new_d_Tuple(NULL, arity, in);
2647 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2648 return new_d_Id(NULL, val, mode);
2650 ir_node *new_Bad (void) {
2653 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2654 return new_d_Confirm (NULL, val, bound, cmp);
2656 ir_node *new_Unknown(ir_mode *m) {
2657 return new_d_Unknown(m);
2659 ir_node *new_CallBegin (ir_node *callee) {
2660 return new_d_CallBegin(NULL, callee);
2662 ir_node *new_EndReg (void) {
2663 return new_d_EndReg(NULL);
2665 ir_node *new_EndExcept (void) {
2666 return new_d_EndExcept(NULL);
2668 ir_node *new_Break (void) {
2669 return new_d_Break(NULL);
2671 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2672 return new_d_Filter(NULL, arg, mode, proj);
2674 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2675 return new_d_FuncCall(NULL, callee, arity, in, tp);