3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /*** ******************************************** */
56 /** privat interfaces, for professional use only */
58 /* Constructs a Block with a fixed number of predecessors.
59 Does not set current_block. Can not be used with automatic
60 Phi node construction. */
62 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
66 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
67 set_Block_matured(res, 1);
68 set_Block_block_visited(res, 0);
70 /* res->attr.block.exc = exc_normal; */
71 /* res->attr.block.handler_entry = 0; */
72 res->attr.block.irg = irg;
73 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
74 res->attr.block.in_cg = NULL;
75 res->attr.block.cg_backedge = NULL;
77 IRN_VRFY_IRG(res, irg);
82 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
86 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
87 /* res->attr.start.irg = irg; */
89 IRN_VRFY_IRG(res, irg);
94 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
100 IRN_VRFY_IRG(res, irg);
104 /* Creates a Phi node with all predecessors. Calling this constructor
105 is only allowed if the corresponding block is mature. */
107 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
111 bool has_unknown = false;
113 /* Don't assert that block matured: the use of this constructor is strongly
115 if ( get_Block_matured(block) )
116 assert( get_irn_arity(block) == arity );
118 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
120 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
122 for (i = arity-1; i >= 0; i--)
123 if (get_irn_op(in[i]) == op_Unknown) {
128 if (!has_unknown) res = optimize_node (res);
129 IRN_VRFY_IRG(res, irg);
131 /* Memory Phis in endless loops must be kept alive.
132 As we can't distinguish these easily we keep all of them alive. */
133 if ((res->op == op_Phi) && (mode == mode_M))
134 add_End_keepalive(irg->end, res);
139 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
143 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
144 res->attr.con.tv = con;
145 set_Const_type(res, tp); /* Call method because of complex assertion. */
146 res = optimize_node (res);
147 assert(get_Const_type(res) == tp);
148 IRN_VRFY_IRG(res, irg);
154 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
156 type *tp = unknown_type;
157 /* removing this somehow causes errors in jack. */
158 return new_rd_Const_type (db, irg, block, mode, con, tp);
162 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
166 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
167 res = optimize_node(res);
168 IRN_VRFY_IRG(res, irg);
173 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
178 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
179 res->attr.proj = proj;
182 assert(get_Proj_pred(res));
183 assert(get_nodes_block(get_Proj_pred(res)));
185 res = optimize_node(res);
187 IRN_VRFY_IRG(res, irg);
193 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
197 assert(arg->op == op_Cond);
198 arg->attr.c.kind = fragmentary;
199 arg->attr.c.default_proj = max_proj;
200 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
205 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
209 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
210 res = optimize_node(res);
211 IRN_VRFY_IRG(res, irg);
216 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
220 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
221 res->attr.cast.totype = to_tp;
222 res = optimize_node(res);
223 IRN_VRFY_IRG(res, irg);
228 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
232 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
233 res = optimize_node (res);
234 IRN_VRFY_IRG(res, irg);
239 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
240 ir_node *op1, ir_node *op2, ir_mode *mode)
247 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
248 res = optimize_node(res);
249 IRN_VRFY_IRG(res, irg);
254 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
255 ir_node *op1, ir_node *op2, ir_mode *mode)
262 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
263 res = optimize_node (res);
264 IRN_VRFY_IRG(res, irg);
269 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
270 ir_node *op, ir_mode *mode)
274 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
275 res = optimize_node(res);
276 IRN_VRFY_IRG(res, irg);
281 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
282 ir_node *op1, ir_node *op2, ir_mode *mode)
289 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
290 res = optimize_node(res);
291 IRN_VRFY_IRG(res, irg);
296 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
297 ir_node *memop, ir_node *op1, ir_node *op2)
305 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
306 res = optimize_node(res);
307 IRN_VRFY_IRG(res, irg);
312 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
313 ir_node *memop, ir_node *op1, ir_node *op2)
321 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
322 res = optimize_node(res);
323 IRN_VRFY_IRG(res, irg);
328 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
329 ir_node *memop, ir_node *op1, ir_node *op2)
337 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
338 res = optimize_node(res);
339 IRN_VRFY_IRG(res, irg);
344 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
345 ir_node *memop, ir_node *op1, ir_node *op2)
353 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
354 res = optimize_node(res);
355 IRN_VRFY_IRG(res, irg);
360 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op1, ir_node *op2, ir_mode *mode)
368 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
369 res = optimize_node(res);
370 IRN_VRFY_IRG(res, irg);
375 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
376 ir_node *op1, ir_node *op2, ir_mode *mode)
383 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
384 res = optimize_node(res);
385 IRN_VRFY_IRG(res, irg);
390 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
391 ir_node *op1, ir_node *op2, ir_mode *mode)
398 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
399 res = optimize_node (res);
400 IRN_VRFY_IRG(res, irg);
405 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
406 ir_node *op, ir_mode *mode)
410 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
418 ir_node *op, ir_node *k, ir_mode *mode)
425 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
433 ir_node *op, ir_node *k, ir_mode *mode)
440 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
441 res = optimize_node(res);
442 IRN_VRFY_IRG(res, irg);
447 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
448 ir_node *op, ir_node *k, ir_mode *mode)
455 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
470 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
478 ir_node *op, ir_mode *mode)
482 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
483 res = optimize_node (res);
484 IRN_VRFY_IRG(res, irg);
489 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
490 ir_node *op1, ir_node *op2)
497 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
498 res = optimize_node(res);
499 IRN_VRFY_IRG(res, irg);
504 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
508 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
509 res = optimize_node (res);
510 IRN_VRFY_IRG (res, irg);
515 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
519 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
520 res->attr.c.kind = dense;
521 res->attr.c.default_proj = 0;
522 res = optimize_node (res);
523 IRN_VRFY_IRG(res, irg);
528 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
529 ir_node *callee, int arity, ir_node **in, type *tp)
536 NEW_ARR_A(ir_node *, r_in, r_arity);
539 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
541 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
543 assert(is_method_type(tp));
544 set_Call_type(res, tp);
545 res->attr.call.exc.pin_state = op_pin_state_pinned;
546 res->attr.call.callee_arr = NULL;
547 res = optimize_node(res);
548 IRN_VRFY_IRG(res, irg);
553 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
554 ir_node *store, int arity, ir_node **in)
561 NEW_ARR_A (ir_node *, r_in, r_arity);
563 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
564 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
565 res = optimize_node(res);
566 IRN_VRFY_IRG(res, irg);
571 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
578 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
579 res = optimize_node(res);
580 IRN_VRFY_IRG(res, irg);
585 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
586 ir_node *store, ir_node *adr, ir_mode *mode)
593 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
594 res->attr.load.exc.pin_state = op_pin_state_pinned;
595 res->attr.load.load_mode = mode;
596 res->attr.load.volatility = volatility_non_volatile;
597 res = optimize_node(res);
598 IRN_VRFY_IRG(res, irg);
603 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
604 ir_node *store, ir_node *adr, ir_node *val)
612 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
613 res->attr.store.exc.pin_state = op_pin_state_pinned;
614 res->attr.store.volatility = volatility_non_volatile;
615 res = optimize_node(res);
616 IRN_VRFY_IRG(res, irg);
621 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
622 ir_node *size, type *alloc_type, where_alloc where)
629 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
630 res->attr.a.exc.pin_state = op_pin_state_pinned;
631 res->attr.a.where = where;
632 res->attr.a.type = alloc_type;
633 res = optimize_node(res);
634 IRN_VRFY_IRG(res, irg);
639 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
640 ir_node *ptr, ir_node *size, type *free_type)
648 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
649 res->attr.f = free_type;
650 res = optimize_node(res);
651 IRN_VRFY_IRG(res, irg);
656 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
657 int arity, ir_node **in, entity *ent)
663 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
666 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
669 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
670 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
671 res->attr.s.ent = ent;
672 res = optimize_node(res);
673 IRN_VRFY_IRG(res, irg);
678 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
679 ir_node *objptr, type *ent)
686 NEW_ARR_A(ir_node *, r_in, r_arity);
690 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
691 res->attr.io.ent = ent;
693 /* res = optimize(res); */
694 IRN_VRFY_IRG(res, irg);
699 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
700 symconst_kind symkind, type *tp)
705 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
709 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
711 res->attr.i.num = symkind;
712 res->attr.i.sym = value;
715 res = optimize_node(res);
716 IRN_VRFY_IRG(res, irg);
721 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
722 symconst_kind symkind)
724 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
728 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
729 symconst_symbol sym = {(type *)symbol};
730 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
733 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
734 symconst_symbol sym = {(type *)symbol};
735 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
738 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
739 symconst_symbol sym = {symbol};
740 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
743 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
744 symconst_symbol sym = {symbol};
745 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
749 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
753 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
754 res = optimize_node(res);
755 IRN_VRFY_IRG(res, irg);
760 new_rd_Bad (ir_graph *irg)
766 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
768 ir_node *in[2], *res;
772 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
773 res->attr.confirm_cmp = cmp;
774 res = optimize_node (res);
775 IRN_VRFY_IRG(res, irg);
780 new_rd_Unknown (ir_graph *irg, ir_mode *m)
782 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
786 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
791 in[0] = get_Call_ptr(call);
792 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
793 /* res->attr.callbegin.irg = irg; */
794 res->attr.callbegin.call = call;
795 res = optimize_node(res);
796 IRN_VRFY_IRG(res, irg);
801 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
805 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
807 IRN_VRFY_IRG(res, irg);
812 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
816 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
817 irg->end_except = res;
818 IRN_VRFY_IRG (res, irg);
823 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
827 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
828 res = optimize_node(res);
829 IRN_VRFY_IRG(res, irg);
834 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
839 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
840 res->attr.filter.proj = proj;
841 res->attr.filter.in_cg = NULL;
842 res->attr.filter.backedge = NULL;
845 assert(get_Proj_pred(res));
846 assert(get_nodes_block(get_Proj_pred(res)));
848 res = optimize_node(res);
849 IRN_VRFY_IRG(res, irg);
855 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
856 ir_node *callee, int arity, ir_node **in, type *tp)
863 NEW_ARR_A(ir_node *, r_in, r_arity);
865 memcpy(&r_in[1], in, sizeof (ir_node *) * arity);
867 res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
869 assert(is_method_type(tp));
870 set_FuncCall_type(res, tp);
871 res->attr.call.callee_arr = NULL;
872 res = optimize_node(res);
873 IRN_VRFY_IRG(res, irg);
878 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
879 return new_rd_Block(NULL, irg, arity, in);
881 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
882 return new_rd_Start(NULL, irg, block);
884 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
885 return new_rd_End(NULL, irg, block);
887 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
888 return new_rd_Jmp(NULL, irg, block);
890 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
891 return new_rd_Cond(NULL, irg, block, c);
893 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
894 ir_node *store, int arity, ir_node **in) {
895 return new_rd_Return(NULL, irg, block, store, arity, in);
897 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
898 ir_node *store, ir_node *obj) {
899 return new_rd_Raise(NULL, irg, block, store, obj);
901 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
902 ir_mode *mode, tarval *con) {
903 return new_rd_Const(NULL, irg, block, mode, con);
905 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
906 symconst_symbol value, symconst_kind symkind) {
907 return new_rd_SymConst(NULL, irg, block, value, symkind);
909 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
910 ir_node *objptr, int n_index, ir_node **index,
912 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
914 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
916 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
918 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
919 ir_node *callee, int arity, ir_node **in,
921 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
923 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
924 ir_node *op1, ir_node *op2, ir_mode *mode) {
925 return new_rd_Add(NULL, irg, block, op1, op2, mode);
927 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
928 ir_node *op1, ir_node *op2, ir_mode *mode) {
929 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
931 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
932 ir_node *op, ir_mode *mode) {
933 return new_rd_Minus(NULL, irg, block, op, mode);
935 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
936 ir_node *op1, ir_node *op2, ir_mode *mode) {
937 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
939 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
940 ir_node *memop, ir_node *op1, ir_node *op2) {
941 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
943 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
944 ir_node *memop, ir_node *op1, ir_node *op2) {
945 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
947 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
948 ir_node *memop, ir_node *op1, ir_node *op2) {
949 return new_rd_Div(NULL, irg, block, memop, op1, op2);
951 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
952 ir_node *memop, ir_node *op1, ir_node *op2) {
953 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
955 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
956 ir_node *op, ir_mode *mode) {
957 return new_rd_Abs(NULL, irg, block, op, mode);
959 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
960 ir_node *op1, ir_node *op2, ir_mode *mode) {
961 return new_rd_And(NULL, irg, block, op1, op2, mode);
963 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
964 ir_node *op1, ir_node *op2, ir_mode *mode) {
965 return new_rd_Or(NULL, irg, block, op1, op2, mode);
967 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
968 ir_node *op1, ir_node *op2, ir_mode *mode) {
969 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
971 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
972 ir_node *op, ir_mode *mode) {
973 return new_rd_Not(NULL, irg, block, op, mode);
975 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
976 ir_node *op1, ir_node *op2) {
977 return new_rd_Cmp(NULL, irg, block, op1, op2);
979 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
980 ir_node *op, ir_node *k, ir_mode *mode) {
981 return new_rd_Shl(NULL, irg, block, op, k, mode);
983 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
984 ir_node *op, ir_node *k, ir_mode *mode) {
985 return new_rd_Shr(NULL, irg, block, op, k, mode);
987 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
988 ir_node *op, ir_node *k, ir_mode *mode) {
989 return new_rd_Shrs(NULL, irg, block, op, k, mode);
991 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
992 ir_node *op, ir_node *k, ir_mode *mode) {
993 return new_rd_Rot(NULL, irg, block, op, k, mode);
995 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
996 ir_node *op, ir_mode *mode) {
997 return new_rd_Conv(NULL, irg, block, op, mode);
999 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1000 return new_rd_Cast(NULL, irg, block, op, to_tp);
1002 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1003 ir_node **in, ir_mode *mode) {
1004 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1006 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1007 ir_node *store, ir_node *adr, ir_mode *mode) {
1008 return new_rd_Load(NULL, irg, block, store, adr, mode);
1010 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1011 ir_node *store, ir_node *adr, ir_node *val) {
1012 return new_rd_Store(NULL, irg, block, store, adr, val);
1014 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1015 ir_node *size, type *alloc_type, where_alloc where) {
1016 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1018 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1019 ir_node *ptr, ir_node *size, type *free_type) {
1020 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1022 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1023 return new_rd_Sync(NULL, irg, block, arity, in);
1025 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1026 ir_mode *mode, long proj) {
1027 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1029 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1031 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1033 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1034 int arity, ir_node **in) {
1035 return new_rd_Tuple(NULL, irg, block, arity, in );
1037 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1038 ir_node *val, ir_mode *mode) {
1039 return new_rd_Id(NULL, irg, block, val, mode);
1041 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1042 return new_rd_Bad(irg);
1044 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1045 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1047 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1048 return new_rd_Unknown(irg, m);
1050 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1051 return new_rd_CallBegin(NULL, irg, block, callee);
1053 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1054 return new_rd_EndReg(NULL, irg, block);
1056 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1057 return new_rd_EndExcept(NULL, irg, block);
1059 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1060 return new_rd_Break(NULL, irg, block);
1062 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1063 ir_mode *mode, long proj) {
1064 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1066 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1067 ir_node *callee, int arity, ir_node **in,
1069 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1073 /** ********************/
1074 /** public interfaces */
1075 /** construction tools */
1079 * - create a new Start node in the current block
1081 * @return s - pointer to the created Start node
1086 new_d_Start (dbg_info* db)
1090 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1091 op_Start, mode_T, 0, NULL);
1092 /* res->attr.start.irg = current_ir_graph; */
1094 res = optimize_node(res);
1095 IRN_VRFY_IRG(res, current_ir_graph);
1100 new_d_End (dbg_info* db)
1103 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1104 op_End, mode_X, -1, NULL);
1105 res = optimize_node(res);
1106 IRN_VRFY_IRG(res, current_ir_graph);
1111 /* Constructs a Block with a fixed number of predecessors.
1112 Does set current_block. Can be used with automatic Phi
1113 node construction. */
1115 new_d_Block (dbg_info* db, int arity, ir_node **in)
1119 bool has_unknown = false;
1121 res = new_rd_Block(db, current_ir_graph, arity, in);
1123 /* Create and initialize array for Phi-node construction. */
1124 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1125 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1126 current_ir_graph->n_loc);
1127 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1130 for (i = arity-1; i >= 0; i--)
1131 if (get_irn_op(in[i]) == op_Unknown) {
1136 if (!has_unknown) res = optimize_node(res);
1137 current_ir_graph->current_block = res;
1139 IRN_VRFY_IRG(res, current_ir_graph);
1144 /* ***********************************************************************/
1145 /* Methods necessary for automatic Phi node creation */
1147 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1148 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1149 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1150 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1152 Call Graph: ( A ---> B == A "calls" B)
1154 get_value mature_immBlock
1162 get_r_value_internal |
1166 new_rd_Phi0 new_rd_Phi_in
1168 * *************************************************************************** */
1170 /** Creates a Phi node with 0 predecessors */
1171 static INLINE ir_node *
1172 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1176 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1177 IRN_VRFY_IRG(res, irg);
1181 /* There are two implementations of the Phi node construction. The first
1182 is faster, but does not work for blocks with more than 2 predecessors.
1183 The second works always but is slower and causes more unnecessary Phi
1185 Select the implementations by the following preprocessor flag set in
1187 #if USE_FAST_PHI_CONSTRUCTION
1189 /* This is a stack used for allocating and deallocating nodes in
1190 new_rd_Phi_in. The original implementation used the obstack
1191 to model this stack, now it is explicit. This reduces side effects.
1193 #if USE_EXPLICIT_PHI_IN_STACK
1194 INLINE Phi_in_stack *
1195 new_Phi_in_stack(void) {
1198 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1200 res->stack = NEW_ARR_F (ir_node *, 0);
1207 free_Phi_in_stack(Phi_in_stack *s) {
1208 DEL_ARR_F(s->stack);
1212 free_to_Phi_in_stack(ir_node *phi) {
1213 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1214 current_ir_graph->Phi_in_stack->pos)
1215 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1217 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1219 (current_ir_graph->Phi_in_stack->pos)++;
1222 static INLINE ir_node *
1223 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1224 int arity, ir_node **in) {
1226 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1227 int pos = current_ir_graph->Phi_in_stack->pos;
1231 /* We need to allocate a new node */
1232 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1233 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1235 /* reuse the old node and initialize it again. */
1238 assert (res->kind == k_ir_node);
1239 assert (res->op == op_Phi);
1243 assert (arity >= 0);
1244 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1245 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1247 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1249 (current_ir_graph->Phi_in_stack->pos)--;
1253 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1255 /* Creates a Phi node with a given, fixed array **in of predecessors.
1256 If the Phi node is unnecessary, as the same value reaches the block
1257 through all control flow paths, it is eliminated and the value
1258 returned directly. This constructor is only intended for use in
1259 the automatic Phi node generation triggered by get_value or mature.
1260 The implementation is quite tricky and depends on the fact, that
1261 the nodes are allocated on a stack:
1262 The in array contains predecessors and NULLs. The NULLs appear,
1263 if get_r_value_internal, that computed the predecessors, reached
1264 the same block on two paths. In this case the same value reaches
1265 this block on both paths, there is no definition in between. We need
1266 not allocate a Phi where these path's merge, but we have to communicate
1267 this fact to the caller. This happens by returning a pointer to the
1268 node the caller _will_ allocate. (Yes, we predict the address. We can
1269 do so because the nodes are allocated on the obstack.) The caller then
1270 finds a pointer to itself and, when this routine is called again,
1273 static INLINE ir_node *
1274 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1277 ir_node *res, *known;
1279 /* Allocate a new node on the obstack. This can return a node to
1280 which some of the pointers in the in-array already point.
1281 Attention: the constructor copies the in array, i.e., the later
1282 changes to the array in this routine do not affect the
1283 constructed node! If the in array contains NULLs, there will be
1284 missing predecessors in the returned node. Is this a possible
1285 internal state of the Phi node generation? */
1286 #if USE_EXPLICIT_PHI_IN_STACK
1287 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1289 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1290 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1293 /* The in-array can contain NULLs. These were returned by
1294 get_r_value_internal if it reached the same block/definition on a
1295 second path. The NULLs are replaced by the node itself to
1296 simplify the test in the next loop. */
1297 for (i = 0; i < ins; ++i) {
1302 /* This loop checks whether the Phi has more than one predecessor.
1303 If so, it is a real Phi node and we break the loop. Else the Phi
1304 node merges the same definition on several paths and therefore is
1306 for (i = 0; i < ins; ++i)
1308 if (in[i] == res || in[i] == known) continue;
1316 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1318 #if USE_EXPLICIT_PHI_IN_STACK
1319 free_to_Phi_in_stack(res);
1321 obstack_free (current_ir_graph->obst, res);
1325 res = optimize_node (res);
1326 IRN_VRFY_IRG(res, irg);
1329 /* return the pointer to the Phi node. This node might be deallocated! */
1334 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1337 allocates and returns this node. The routine called to allocate the
1338 node might optimize it away and return a real value, or even a pointer
1339 to a deallocated Phi node on top of the obstack!
1340 This function is called with an in-array of proper size. **/
1342 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1344 ir_node *prevBlock, *res;
1347 /* This loop goes to all predecessor blocks of the block the Phi node is in
1348 and there finds the operands of the Phi node by calling
1349 get_r_value_internal. */
1350 for (i = 1; i <= ins; ++i) {
1351 assert (block->in[i]);
1352 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1354 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1357 /* After collecting all predecessors into the array nin a new Phi node
1358 with these predecessors is created. This constructor contains an
1359 optimization: If all predecessors of the Phi node are identical it
1360 returns the only operand instead of a new Phi node. If the value
1361 passes two different control flow edges without being defined, and
1362 this is the second path treated, a pointer to the node that will be
1363 allocated for the first path (recursion) is returned. We already
1364 know the address of this node, as it is the next node to be allocated
1365 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1366 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1368 /* Now we now the value for "pos" and can enter it in the array with
1369 all known local variables. Attention: this might be a pointer to
1370 a node, that later will be allocated!!! See new_rd_Phi_in.
1371 If this is called in mature, after some set_value in the same block,
1372 the proper value must not be overwritten:
1374 get_value (makes Phi0, put's it into graph_arr)
1375 set_value (overwrites Phi0 in graph_arr)
1376 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1379 if (!block->attr.block.graph_arr[pos]) {
1380 block->attr.block.graph_arr[pos] = res;
1382 /* printf(" value already computed by %s\n",
1383 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1389 /* This function returns the last definition of a variable. In case
1390 this variable was last defined in a previous block, Phi nodes are
1391 inserted. If the part of the firm graph containing the definition
1392 is not yet constructed, a dummy Phi node is returned. */
1394 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1397 /* There are 4 cases to treat.
1399 1. The block is not mature and we visit it the first time. We can not
1400 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1401 predecessors is returned. This node is added to the linked list (field
1402 "link") of the containing block to be completed when this block is
1403 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1406 2. The value is already known in this block, graph_arr[pos] is set and we
1407 visit the block the first time. We can return the value without
1408 creating any new nodes.
1410 3. The block is mature and we visit it the first time. A Phi node needs
1411 to be created (phi_merge). If the Phi is not needed, as all it's
1412 operands are the same value reaching the block through different
1413 paths, it's optimized away and the value itself is returned.
1415 4. The block is mature, and we visit it the second time. Now two
1416 subcases are possible:
1417 * The value was computed completely the last time we were here. This
1418 is the case if there is no loop. We can return the proper value.
1419 * The recursion that visited this node and set the flag did not
1420 return yet. We are computing a value in a loop and need to
1421 break the recursion without knowing the result yet.
1422 @@@ strange case. Straight forward we would create a Phi before
1423 starting the computation of it's predecessors. In this case we will
1424 find a Phi here in any case. The problem is that this implementation
1425 only creates a Phi after computing the predecessors, so that it is
1426 hard to compute self references of this Phi. @@@
1427 There is no simple check for the second subcase. Therefore we check
1428 for a second visit and treat all such cases as the second subcase.
1429 Anyways, the basic situation is the same: we reached a block
1430 on two paths without finding a definition of the value: No Phi
1431 nodes are needed on both paths.
1432 We return this information "Two paths, no Phi needed" by a very tricky
1433 implementation that relies on the fact that an obstack is a stack and
1434 will return a node with the same address on different allocations.
1435 Look also at phi_merge and new_rd_phi_in to understand this.
1436 @@@ Unfortunately this does not work, see testprogram
1437 three_cfpred_example.
1441 /* case 4 -- already visited. */
1442 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1444 /* visited the first time */
1445 set_irn_visited(block, get_irg_visited(current_ir_graph));
1447 /* Get the local valid value */
1448 res = block->attr.block.graph_arr[pos];
1450 /* case 2 -- If the value is actually computed, return it. */
1451 if (res) return res;
1453 if (block->attr.block.matured) { /* case 3 */
1455 /* The Phi has the same amount of ins as the corresponding block. */
1456 int ins = get_irn_arity(block);
1458 NEW_ARR_A (ir_node *, nin, ins);
1460 /* Phi merge collects the predecessors and then creates a node. */
1461 res = phi_merge (block, pos, mode, nin, ins);
1463 } else { /* case 1 */
1464 /* The block is not mature, we don't know how many in's are needed. A Phi
1465 with zero predecessors is created. Such a Phi node is called Phi0
1466 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1467 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1469 The Phi0 has to remember the pos of it's internal value. If the real
1470 Phi is computed, pos is used to update the array with the local
1473 res = new_rd_Phi0 (current_ir_graph, block, mode);
1474 res->attr.phi0_pos = pos;
1475 res->link = block->link;
1479 /* If we get here, the frontend missed a use-before-definition error */
1482 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1483 assert (mode->code >= irm_F && mode->code <= irm_P);
1484 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1485 tarval_mode_null[mode->code]);
1488 /* The local valid value is available now. */
1489 block->attr.block.graph_arr[pos] = res;
1497 it starts the recursion. This causes an Id at the entry of
1498 every block that has no definition of the value! **/
1500 #if USE_EXPLICIT_PHI_IN_STACK
1502 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1503 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1506 static INLINE ir_node *
1507 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1508 ir_node **in, int ins, ir_node *phi0)
1511 ir_node *res, *known;
1513 /* Allocate a new node on the obstack. The allocation copies the in
1515 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1516 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1518 /* This loop checks whether the Phi has more than one predecessor.
1519 If so, it is a real Phi node and we break the loop. Else the
1520 Phi node merges the same definition on several paths and therefore
1521 is not needed. Don't consider Bad nodes! */
1523 for (i=0; i < ins; ++i)
1527 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1529 /* Optimize self referencing Phis: We can't detect them yet properly, as
1530 they still refer to the Phi0 they will replace. So replace right now. */
1531 if (phi0 && in[i] == phi0) in[i] = res;
1533 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1541 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1544 obstack_free (current_ir_graph->obst, res);
1545 if (is_Phi(known)) {
1546 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1547 order, an enclosing Phi know may get superfluous. */
1548 res = optimize_in_place_2(known);
1549 if (res != known) { exchange(known, res); }
1554 /* A undefined value, e.g., in unreachable code. */
1558 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1559 IRN_VRFY_IRG(res, irg);
1560 /* Memory Phis in endless loops must be kept alive.
1561 As we can't distinguish these easily we keep all of them alive. */
1562 if ((res->op == op_Phi) && (mode == mode_M))
1563 add_End_keepalive(irg->end, res);
1570 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1572 #if PRECISE_EXC_CONTEXT
1574 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1576 /* Construct a new frag_array for node n.
1577 Copy the content from the current graph_arr of the corresponding block:
1578 this is the current state.
1579 Set ProjM(n) as current memory state.
1580 Further the last entry in frag_arr of current block points to n. This
1581 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1583 static INLINE ir_node ** new_frag_arr (ir_node *n)
1588 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1589 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1590 sizeof(ir_node *)*current_ir_graph->n_loc);
1592 /* turn off optimization before allocating Proj nodes, as res isn't
1594 opt = get_opt_optimize(); set_optimize(0);
1595 /* Here we rely on the fact that all frag ops have Memory as first result! */
1596 if (get_irn_op(n) == op_Call)
1597 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1599 assert((pn_Quot_M == pn_DivMod_M) &&
1600 (pn_Quot_M == pn_Div_M) &&
1601 (pn_Quot_M == pn_Mod_M) &&
1602 (pn_Quot_M == pn_Load_M) &&
1603 (pn_Quot_M == pn_Store_M) &&
1604 (pn_Quot_M == pn_Alloc_M) );
1605 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1609 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1614 * returns the frag_arr from a node
1616 static INLINE ir_node **
1617 get_frag_arr (ir_node *n) {
1618 switch (get_irn_opcode(n)) {
1620 return n->attr.call.exc.frag_arr;
1622 return n->attr.a.exc.frag_arr;
1624 return n->attr.load.exc.frag_arr;
1626 return n->attr.store.exc.frag_arr;
1628 return n->attr.except.frag_arr;
1633 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1635 if (!frag_arr[pos]) frag_arr[pos] = val;
1636 if (frag_arr[current_ir_graph->n_loc - 1]) {
1637 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1638 assert(arr != frag_arr && "Endless recursion detected");
1639 set_frag_value(arr, pos, val);
1644 for (i = 0; i < 1000; ++i) {
1645 if (!frag_arr[pos]) {
1646 frag_arr[pos] = val;
1648 if (frag_arr[current_ir_graph->n_loc - 1]) {
1649 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1655 assert(0 && "potential endless recursion");
1660 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1664 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1666 frag_arr = get_frag_arr(cfOp);
1667 res = frag_arr[pos];
1669 if (block->attr.block.graph_arr[pos]) {
1670 /* There was a set_value after the cfOp and no get_value before that
1671 set_value. We must build a Phi node now. */
1672 if (block->attr.block.matured) {
1673 int ins = get_irn_arity(block);
1675 NEW_ARR_A (ir_node *, nin, ins);
1676 res = phi_merge(block, pos, mode, nin, ins);
1678 res = new_rd_Phi0 (current_ir_graph, block, mode);
1679 res->attr.phi0_pos = pos;
1680 res->link = block->link;
1684 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1685 but this should be better: (remove comment if this works) */
1686 /* It's a Phi, we can write this into all graph_arrs with NULL */
1687 set_frag_value(block->attr.block.graph_arr, pos, res);
1689 res = get_r_value_internal(block, pos, mode);
1690 set_frag_value(block->attr.block.graph_arr, pos, res);
1698 computes the predecessors for the real phi node, and then
1699 allocates and returns this node. The routine called to allocate the
1700 node might optimize it away and return a real value.
1701 This function must be called with an in-array of proper size. **/
1703 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1705 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1708 /* If this block has no value at pos create a Phi0 and remember it
1709 in graph_arr to break recursions.
1710 Else we may not set graph_arr as there a later value is remembered. */
1712 if (!block->attr.block.graph_arr[pos]) {
1713 if (block == get_irg_start_block(current_ir_graph)) {
1714 /* Collapsing to Bad tarvals is no good idea.
1715 So we call a user-supplied routine here that deals with this case as
1716 appropriate for the given language. Sorryly the only help we can give
1717 here is the position.
1719 Even if all variables are defined before use, it can happen that
1720 we get to the start block, if a cond has been replaced by a tuple
1721 (bad, jmp). In this case we call the function needlessly, eventually
1722 generating an non existant error.
1723 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1726 if (default_initialize_local_variable)
1727 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1729 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1730 /* We don't need to care about exception ops in the start block.
1731 There are none by definition. */
1732 return block->attr.block.graph_arr[pos];
1734 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1735 block->attr.block.graph_arr[pos] = phi0;
1736 #if PRECISE_EXC_CONTEXT
1737 if (get_opt_precise_exc_context()) {
1738 /* Set graph_arr for fragile ops. Also here we should break recursion.
1739 We could choose a cyclic path through an cfop. But the recursion would
1740 break at some point. */
1741 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1747 /* This loop goes to all predecessor blocks of the block the Phi node
1748 is in and there finds the operands of the Phi node by calling
1749 get_r_value_internal. */
1750 for (i = 1; i <= ins; ++i) {
1751 prevCfOp = skip_Proj(block->in[i]);
1753 if (is_Bad(prevCfOp)) {
1754 /* In case a Cond has been optimized we would get right to the start block
1755 with an invalid definition. */
1756 nin[i-1] = new_Bad();
1759 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1761 if (!is_Bad(prevBlock)) {
1762 #if PRECISE_EXC_CONTEXT
1763 if (get_opt_precise_exc_context() &&
1764 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1765 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1766 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1769 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1771 nin[i-1] = new_Bad();
1775 /* We want to pass the Phi0 node to the constructor: this finds additional
1776 optimization possibilities.
1777 The Phi0 node either is allocated in this function, or it comes from
1778 a former call to get_r_value_internal. In this case we may not yet
1779 exchange phi0, as this is done in mature_immBlock. */
1781 phi0_all = block->attr.block.graph_arr[pos];
1782 if (!((get_irn_op(phi0_all) == op_Phi) &&
1783 (get_irn_arity(phi0_all) == 0) &&
1784 (get_nodes_block(phi0_all) == block)))
1790 /* After collecting all predecessors into the array nin a new Phi node
1791 with these predecessors is created. This constructor contains an
1792 optimization: If all predecessors of the Phi node are identical it
1793 returns the only operand instead of a new Phi node. */
1794 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1796 /* In case we allocated a Phi0 node at the beginning of this procedure,
1797 we need to exchange this Phi0 with the real Phi. */
1799 exchange(phi0, res);
1800 block->attr.block.graph_arr[pos] = res;
1801 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1802 only an optimization. */
1808 /* This function returns the last definition of a variable. In case
1809 this variable was last defined in a previous block, Phi nodes are
1810 inserted. If the part of the firm graph containing the definition
1811 is not yet constructed, a dummy Phi node is returned. */
1813 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1816 /* There are 4 cases to treat.
1818 1. The block is not mature and we visit it the first time. We can not
1819 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1820 predecessors is returned. This node is added to the linked list (field
1821 "link") of the containing block to be completed when this block is
1822 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1825 2. The value is already known in this block, graph_arr[pos] is set and we
1826 visit the block the first time. We can return the value without
1827 creating any new nodes.
1829 3. The block is mature and we visit it the first time. A Phi node needs
1830 to be created (phi_merge). If the Phi is not needed, as all it's
1831 operands are the same value reaching the block through different
1832 paths, it's optimized away and the value itself is returned.
1834 4. The block is mature, and we visit it the second time. Now two
1835 subcases are possible:
1836 * The value was computed completely the last time we were here. This
1837 is the case if there is no loop. We can return the proper value.
1838 * The recursion that visited this node and set the flag did not
1839 return yet. We are computing a value in a loop and need to
1840 break the recursion. This case only happens if we visited
1841 the same block with phi_merge before, which inserted a Phi0.
1842 So we return the Phi0.
1845 /* case 4 -- already visited. */
1846 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1847 /* As phi_merge allocates a Phi0 this value is always defined. Here
1848 is the critical difference of the two algorithms. */
1849 assert(block->attr.block.graph_arr[pos]);
1850 return block->attr.block.graph_arr[pos];
1853 /* visited the first time */
1854 set_irn_visited(block, get_irg_visited(current_ir_graph));
1856 /* Get the local valid value */
1857 res = block->attr.block.graph_arr[pos];
1859 /* case 2 -- If the value is actually computed, return it. */
1860 if (res) { return res; };
1862 if (block->attr.block.matured) { /* case 3 */
1864 /* The Phi has the same amount of ins as the corresponding block. */
1865 int ins = get_irn_arity(block);
1867 NEW_ARR_A (ir_node *, nin, ins);
1869 /* Phi merge collects the predecessors and then creates a node. */
1870 res = phi_merge (block, pos, mode, nin, ins);
1872 } else { /* case 1 */
1873 /* The block is not mature, we don't know how many in's are needed. A Phi
1874 with zero predecessors is created. Such a Phi node is called Phi0
1875 node. The Phi0 is then added to the list of Phi0 nodes in this block
1876 to be matured by mature_immBlock later.
1877 The Phi0 has to remember the pos of it's internal value. If the real
1878 Phi is computed, pos is used to update the array with the local
1880 res = new_rd_Phi0 (current_ir_graph, block, mode);
1881 res->attr.phi0_pos = pos;
1882 res->link = block->link;
1886 /* If we get here, the frontend missed a use-before-definition error */
1889 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1890 assert (mode->code >= irm_F && mode->code <= irm_P);
1891 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1892 get_mode_null(mode));
1895 /* The local valid value is available now. */
1896 block->attr.block.graph_arr[pos] = res;
1901 #endif /* USE_FAST_PHI_CONSTRUCTION */
1903 /* ************************************************************************** */
1905 /** Finalize a Block node, when all control flows are known. */
1906 /** Acceptable parameters are only Block nodes. */
1908 mature_immBlock (ir_node *block)
1915 assert (get_irn_opcode(block) == iro_Block);
1916 /* @@@ should be commented in
1917 assert (!get_Block_matured(block) && "Block already matured"); */
1919 if (!get_Block_matured(block)) {
1920 ins = ARR_LEN (block->in)-1;
1921 /* Fix block parameters */
1922 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1924 /* An array for building the Phi nodes. */
1925 NEW_ARR_A (ir_node *, nin, ins);
1927 /* Traverse a chain of Phi nodes attached to this block and mature
1929 for (n = block->link; n; n=next) {
1930 inc_irg_visited(current_ir_graph);
1932 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1935 block->attr.block.matured = 1;
1937 /* Now, as the block is a finished firm node, we can optimize it.
1938 Since other nodes have been allocated since the block was created
1939 we can not free the node on the obstack. Therefore we have to call
1941 Unfortunately the optimization does not change a lot, as all allocated
1942 nodes refer to the unoptimized node.
1943 We can call _2, as global cse has no effect on blocks. */
1944 block = optimize_in_place_2(block);
1945 IRN_VRFY_IRG(block, current_ir_graph);
1950 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1952 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1957 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1959 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1964 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1966 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1972 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1974 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1979 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1981 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1986 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1989 assert(arg->op == op_Cond);
1990 arg->attr.c.kind = fragmentary;
1991 arg->attr.c.default_proj = max_proj;
1992 res = new_Proj (arg, mode_X, max_proj);
1997 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1999 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2004 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2006 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2010 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2012 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2017 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2019 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2024 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2026 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2032 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2034 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2039 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2041 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2046 * allocate the frag array
2048 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2049 if (get_opt_precise_exc_context()) {
2050 if ((current_ir_graph->phase_state == phase_building) &&
2051 (get_irn_op(res) == op) && /* Could be optimized away. */
2052 !*frag_store) /* Could be a cse where the arr is already set. */ {
2053 *frag_store = new_frag_arr(res);
2060 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2063 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2065 res->attr.except.pin_state = op_pin_state_pinned;
2066 #if PRECISE_EXC_CONTEXT
2067 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2074 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2077 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2079 res->attr.except.pin_state = op_pin_state_pinned;
2080 #if PRECISE_EXC_CONTEXT
2081 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2088 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2091 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2093 res->attr.except.pin_state = op_pin_state_pinned;
2094 #if PRECISE_EXC_CONTEXT
2095 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2102 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2105 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2107 res->attr.except.pin_state = op_pin_state_pinned;
2108 #if PRECISE_EXC_CONTEXT
2109 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2116 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2118 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2123 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2125 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2130 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2132 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2137 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2139 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2144 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2146 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2151 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2153 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2158 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2160 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2165 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2167 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2172 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2174 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2179 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2181 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2186 new_d_Jmp (dbg_info* db)
2188 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2192 new_d_Cond (dbg_info* db, ir_node *c)
2194 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2198 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2202 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2203 store, callee, arity, in, tp);
2204 #if PRECISE_EXC_CONTEXT
2205 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2212 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2214 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2219 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2221 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2226 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2229 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2231 #if PRECISE_EXC_CONTEXT
2232 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2239 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2242 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2244 #if PRECISE_EXC_CONTEXT
2245 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2252 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2256 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2257 store, size, alloc_type, where);
2258 #if PRECISE_EXC_CONTEXT
2259 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2266 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2268 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2269 store, ptr, size, free_type);
2273 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2274 /* GL: objptr was called frame before. Frame was a bad choice for the name
2275 as the operand could as well be a pointer to a dynamic object. */
2277 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2278 store, objptr, 0, NULL, ent);
2282 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2284 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2285 store, objptr, n_index, index, sel);
2289 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2291 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2292 store, objptr, ent));
2296 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2298 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2303 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2305 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2310 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2312 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2320 return __new_d_Bad();
2324 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2326 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2331 new_d_Unknown (ir_mode *m)
2333 return new_rd_Unknown(current_ir_graph, m);
2337 new_d_CallBegin (dbg_info *db, ir_node *call)
2340 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2345 new_d_EndReg (dbg_info *db)
2348 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2353 new_d_EndExcept (dbg_info *db)
2356 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2361 new_d_Break (dbg_info *db)
2363 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2367 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2369 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2374 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2378 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2379 callee, arity, in, tp);
2384 /* ********************************************************************* */
2385 /* Comfortable interface with automatic Phi node construction. */
2386 /* (Uses also constructors of ?? interface, except new_Block. */
2387 /* ********************************************************************* */
2389 /* * Block construction **/
2390 /* immature Block without predecessors */
2391 ir_node *new_d_immBlock (dbg_info* db) {
2394 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2395 /* creates a new dynamic in-array as length of in is -1 */
2396 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2397 current_ir_graph->current_block = res;
2398 res->attr.block.matured = 0;
2399 /* res->attr.block.exc = exc_normal; */
2400 /* res->attr.block.handler_entry = 0; */
2401 res->attr.block.irg = current_ir_graph;
2402 res->attr.block.backedge = NULL;
2403 res->attr.block.in_cg = NULL;
2404 res->attr.block.cg_backedge = NULL;
2405 set_Block_block_visited(res, 0);
2407 /* Create and initialize array for Phi-node construction. */
2408 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2409 current_ir_graph->n_loc);
2410 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2412 /* Immature block may not be optimized! */
2413 IRN_VRFY_IRG(res, current_ir_graph);
2419 new_immBlock (void) {
2420 return new_d_immBlock(NULL);
2423 /* add an adge to a jmp/control flow node */
2425 add_immBlock_pred (ir_node *block, ir_node *jmp)
2427 if (block->attr.block.matured) {
2428 assert(0 && "Error: Block already matured!\n");
2431 assert(jmp != NULL);
2432 ARR_APP1(ir_node *, block->in, jmp);
2436 /* changing the current block */
2438 set_cur_block (ir_node *target)
2440 current_ir_graph->current_block = target;
2443 /* ************************ */
2444 /* parameter administration */
2446 /* get a value from the parameter array from the current block by its index */
2448 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2450 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2451 inc_irg_visited(current_ir_graph);
2453 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2455 /* get a value from the parameter array from the current block by its index */
2457 get_value (int pos, ir_mode *mode)
2459 return get_d_value(NULL, pos, mode);
2462 /* set a value at position pos in the parameter array from the current block */
2464 set_value (int pos, ir_node *value)
2466 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2467 assert(pos+1 < current_ir_graph->n_loc);
2468 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2471 /* get the current store */
2475 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2476 /* GL: one could call get_value instead */
2477 inc_irg_visited(current_ir_graph);
2478 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2481 /* set the current store */
2483 set_store (ir_node *store)
2485 /* GL: one could call set_value instead */
2486 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2487 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2491 keep_alive (ir_node *ka)
2493 add_End_keepalive(current_ir_graph->end, ka);
2496 /** Useful access routines **/
2497 /* Returns the current block of the current graph. To set the current
2498 block use set_cur_block. */
2499 ir_node *get_cur_block() {
2500 return get_irg_current_block(current_ir_graph);
2503 /* Returns the frame type of the current graph */
2504 type *get_cur_frame_type() {
2505 return get_irg_frame_type(current_ir_graph);
2509 /* ********************************************************************* */
2512 /* call once for each run of the library */
2514 init_cons (default_initialize_local_variable_func_t *func)
2516 default_initialize_local_variable = func;
2519 /* call for each graph */
2521 finalize_cons (ir_graph *irg) {
2522 irg->phase_state = phase_high;
2526 ir_node *new_Block(int arity, ir_node **in) {
2527 return new_d_Block(NULL, arity, in);
2529 ir_node *new_Start (void) {
2530 return new_d_Start(NULL);
2532 ir_node *new_End (void) {
2533 return new_d_End(NULL);
2535 ir_node *new_Jmp (void) {
2536 return new_d_Jmp(NULL);
2538 ir_node *new_Cond (ir_node *c) {
2539 return new_d_Cond(NULL, c);
2541 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2542 return new_d_Return(NULL, store, arity, in);
2544 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2545 return new_d_Raise(NULL, store, obj);
2547 ir_node *new_Const (ir_mode *mode, tarval *con) {
2548 return new_d_Const(NULL, mode, con);
2550 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2551 return new_d_SymConst(NULL, value, kind);
2553 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2554 return new_d_simpleSel(NULL, store, objptr, ent);
2556 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2558 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2560 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2561 return new_d_InstOf (NULL, store, objptr, ent);
2563 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2565 return new_d_Call(NULL, store, callee, arity, in, tp);
2567 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2568 return new_d_Add(NULL, op1, op2, mode);
2570 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2571 return new_d_Sub(NULL, op1, op2, mode);
2573 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2574 return new_d_Minus(NULL, op, mode);
2576 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2577 return new_d_Mul(NULL, op1, op2, mode);
2579 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2580 return new_d_Quot(NULL, memop, op1, op2);
2582 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2583 return new_d_DivMod(NULL, memop, op1, op2);
2585 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2586 return new_d_Div(NULL, memop, op1, op2);
2588 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2589 return new_d_Mod(NULL, memop, op1, op2);
2591 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2592 return new_d_Abs(NULL, op, mode);
2594 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2595 return new_d_And(NULL, op1, op2, mode);
2597 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2598 return new_d_Or(NULL, op1, op2, mode);
2600 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2601 return new_d_Eor(NULL, op1, op2, mode);
2603 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2604 return new_d_Not(NULL, op, mode);
2606 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2607 return new_d_Shl(NULL, op, k, mode);
2609 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2610 return new_d_Shr(NULL, op, k, mode);
2612 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2613 return new_d_Shrs(NULL, op, k, mode);
2615 #define new_Rotate new_Rot
2616 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2617 return new_d_Rot(NULL, op, k, mode);
2619 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2620 return new_d_Cmp(NULL, op1, op2);
2622 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2623 return new_d_Conv(NULL, op, mode);
2625 ir_node *new_Cast (ir_node *op, type *to_tp) {
2626 return new_d_Cast(NULL, op, to_tp);
2628 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2629 return new_d_Phi(NULL, arity, in, mode);
2631 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2632 return new_d_Load(NULL, store, addr, mode);
2634 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2635 return new_d_Store(NULL, store, addr, val);
2637 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2638 where_alloc where) {
2639 return new_d_Alloc(NULL, store, size, alloc_type, where);
2641 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2643 return new_d_Free(NULL, store, ptr, size, free_type);
2645 ir_node *new_Sync (int arity, ir_node **in) {
2646 return new_d_Sync(NULL, arity, in);
2648 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2649 return new_d_Proj(NULL, arg, mode, proj);
2651 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2652 return new_d_defaultProj(NULL, arg, max_proj);
2654 ir_node *new_Tuple (int arity, ir_node **in) {
2655 return new_d_Tuple(NULL, arity, in);
2657 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2658 return new_d_Id(NULL, val, mode);
2660 ir_node *new_Bad (void) {
2663 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2664 return new_d_Confirm (NULL, val, bound, cmp);
2666 ir_node *new_Unknown(ir_mode *m) {
2667 return new_d_Unknown(m);
2669 ir_node *new_CallBegin (ir_node *callee) {
2670 return new_d_CallBegin(NULL, callee);
2672 ir_node *new_EndReg (void) {
2673 return new_d_EndReg(NULL);
2675 ir_node *new_EndExcept (void) {
2676 return new_d_EndExcept(NULL);
2678 ir_node *new_Break (void) {
2679 return new_d_Break(NULL);
2681 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2682 return new_d_Filter(NULL, arg, mode, proj);
2684 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2685 return new_d_FuncCall(NULL, callee, arity, in, tp);