3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /*** ******************************************** */
56 /** privat interfaces, for professional use only */
58 /* Constructs a Block with a fixed number of predecessors.
59 Does not set current_block. Can not be used with automatic
60 Phi node construction. */
62 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
66 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
67 set_Block_matured(res, 1);
68 set_Block_block_visited(res, 0);
70 /* res->attr.block.exc = exc_normal; */
71 /* res->attr.block.handler_entry = 0; */
72 res->attr.block.irg = irg;
73 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
74 res->attr.block.in_cg = NULL;
75 res->attr.block.cg_backedge = NULL;
77 IRN_VRFY_IRG(res, irg);
82 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
86 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
87 /* res->attr.start.irg = irg; */
89 IRN_VRFY_IRG(res, irg);
94 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
100 IRN_VRFY_IRG(res, irg);
104 /* Creates a Phi node with all predecessors. Calling this constructor
105 is only allowed if the corresponding block is mature. */
107 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
111 bool has_unknown = false;
113 /* Don't assert that block matured: the use of this constructor is strongly
115 if ( get_Block_matured(block) )
116 assert( get_irn_arity(block) == arity );
118 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
120 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
122 for (i = arity-1; i >= 0; i--)
123 if (get_irn_op(in[i]) == op_Unknown) {
128 if (!has_unknown) res = optimize_node (res);
129 IRN_VRFY_IRG(res, irg);
131 /* Memory Phis in endless loops must be kept alive.
132 As we can't distinguish these easily we keep all of them alive. */
133 if ((res->op == op_Phi) && (mode == mode_M))
134 add_End_keepalive(irg->end, res);
139 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
143 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
144 res->attr.con.tv = con;
145 set_Const_type(res, tp); /* Call method because of complex assertion. */
146 res = optimize_node (res);
147 assert(get_Const_type(res) == tp);
148 IRN_VRFY_IRG(res, irg);
154 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
156 type *tp = unknown_type;
157 /* removing this somehow causes errors in jack. */
158 return new_rd_Const_type (db, irg, block, mode, con, tp);
162 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
166 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
167 res = optimize_node(res);
168 IRN_VRFY_IRG(res, irg);
173 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
178 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
179 res->attr.proj = proj;
182 assert(get_Proj_pred(res));
183 assert(get_nodes_block(get_Proj_pred(res)));
185 res = optimize_node(res);
187 IRN_VRFY_IRG(res, irg);
193 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
197 assert(arg->op == op_Cond);
198 arg->attr.c.kind = fragmentary;
199 arg->attr.c.default_proj = max_proj;
200 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
205 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
209 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
210 res = optimize_node(res);
211 IRN_VRFY_IRG(res, irg);
216 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
220 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
221 res->attr.cast.totype = to_tp;
222 res = optimize_node(res);
223 IRN_VRFY_IRG(res, irg);
228 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
232 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
233 res = optimize_node (res);
234 IRN_VRFY_IRG(res, irg);
239 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
240 ir_node *op1, ir_node *op2, ir_mode *mode)
247 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
248 res = optimize_node(res);
249 IRN_VRFY_IRG(res, irg);
254 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
255 ir_node *op1, ir_node *op2, ir_mode *mode)
262 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
263 res = optimize_node (res);
264 IRN_VRFY_IRG(res, irg);
269 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
270 ir_node *op, ir_mode *mode)
274 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
275 res = optimize_node(res);
276 IRN_VRFY_IRG(res, irg);
281 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
282 ir_node *op1, ir_node *op2, ir_mode *mode)
289 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
290 res = optimize_node(res);
291 IRN_VRFY_IRG(res, irg);
296 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
297 ir_node *memop, ir_node *op1, ir_node *op2)
305 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
306 res = optimize_node(res);
307 IRN_VRFY_IRG(res, irg);
312 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
313 ir_node *memop, ir_node *op1, ir_node *op2)
321 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
322 res = optimize_node(res);
323 IRN_VRFY_IRG(res, irg);
328 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
329 ir_node *memop, ir_node *op1, ir_node *op2)
337 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
338 res = optimize_node(res);
339 IRN_VRFY_IRG(res, irg);
344 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
345 ir_node *memop, ir_node *op1, ir_node *op2)
353 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
354 res = optimize_node(res);
355 IRN_VRFY_IRG(res, irg);
360 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op1, ir_node *op2, ir_mode *mode)
368 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
369 res = optimize_node(res);
370 IRN_VRFY_IRG(res, irg);
375 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
376 ir_node *op1, ir_node *op2, ir_mode *mode)
383 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
384 res = optimize_node(res);
385 IRN_VRFY_IRG(res, irg);
390 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
391 ir_node *op1, ir_node *op2, ir_mode *mode)
398 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
399 res = optimize_node (res);
400 IRN_VRFY_IRG(res, irg);
405 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
406 ir_node *op, ir_mode *mode)
410 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
418 ir_node *op, ir_node *k, ir_mode *mode)
425 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
433 ir_node *op, ir_node *k, ir_mode *mode)
440 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
441 res = optimize_node(res);
442 IRN_VRFY_IRG(res, irg);
447 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
448 ir_node *op, ir_node *k, ir_mode *mode)
455 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
470 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
478 ir_node *op, ir_mode *mode)
482 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
483 res = optimize_node (res);
484 IRN_VRFY_IRG(res, irg);
489 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
490 ir_node *op1, ir_node *op2)
497 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
498 res = optimize_node(res);
499 IRN_VRFY_IRG(res, irg);
504 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
508 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
509 res = optimize_node (res);
510 IRN_VRFY_IRG (res, irg);
515 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
519 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
520 res->attr.c.kind = dense;
521 res->attr.c.default_proj = 0;
522 res = optimize_node (res);
523 IRN_VRFY_IRG(res, irg);
528 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
529 ir_node *callee, int arity, ir_node **in, type *tp)
536 NEW_ARR_A(ir_node *, r_in, r_arity);
539 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
541 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
543 assert(is_method_type(tp));
544 set_Call_type(res, tp);
545 res->attr.call.callee_arr = NULL;
546 res = optimize_node(res);
547 IRN_VRFY_IRG(res, irg);
552 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
553 ir_node *store, int arity, ir_node **in)
560 NEW_ARR_A (ir_node *, r_in, r_arity);
562 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
563 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
564 res = optimize_node(res);
565 IRN_VRFY_IRG(res, irg);
570 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
577 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
578 res = optimize_node(res);
579 IRN_VRFY_IRG(res, irg);
584 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
585 ir_node *store, ir_node *adr, ir_mode *mode)
592 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
593 res->attr.load.load_mode = mode;
594 res->attr.load.volatility = volatility_non_volatile;
595 res = optimize_node(res);
596 IRN_VRFY_IRG(res, irg);
601 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
602 ir_node *store, ir_node *adr, ir_node *val)
610 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
611 res->attr.store.volatility = volatility_non_volatile;
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
618 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
619 ir_node *size, type *alloc_type, where_alloc where)
626 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
627 res->attr.a.where = where;
628 res->attr.a.type = alloc_type;
629 res = optimize_node(res);
630 IRN_VRFY_IRG(res, irg);
635 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
636 ir_node *ptr, ir_node *size, type *free_type)
644 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
645 res->attr.f = free_type;
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, irg);
652 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
653 int arity, ir_node **in, entity *ent)
659 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
662 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
665 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
666 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
667 res->attr.s.ent = ent;
668 res = optimize_node(res);
669 IRN_VRFY_IRG(res, irg);
674 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
675 ir_node *objptr, type *ent)
682 NEW_ARR_A(ir_node *, r_in, r_arity);
686 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
687 res->attr.io.ent = ent;
689 /* res = optimize(res); */
690 IRN_VRFY_IRG(res, irg);
695 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
696 symconst_kind symkind, type *tp)
701 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
705 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
707 res->attr.i.num = symkind;
708 res->attr.i.sym = value;
711 res = optimize_node(res);
712 IRN_VRFY_IRG(res, irg);
717 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
718 symconst_kind symkind)
720 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
724 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
725 symconst_symbol sym = {(type *)symbol};
726 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
729 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
730 symconst_symbol sym = {(type *)symbol};
731 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
734 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
735 symconst_symbol sym = {symbol};
736 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
739 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
740 symconst_symbol sym = {symbol};
741 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
745 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
749 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
750 res = optimize_node(res);
751 IRN_VRFY_IRG(res, irg);
756 new_rd_Bad (ir_graph *irg)
762 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
764 ir_node *in[2], *res;
768 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
769 res->attr.confirm_cmp = cmp;
770 res = optimize_node (res);
771 IRN_VRFY_IRG(res, irg);
776 new_rd_Unknown (ir_graph *irg, ir_mode *m)
778 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
782 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
787 in[0] = get_Call_ptr(call);
788 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
789 /* res->attr.callbegin.irg = irg; */
790 res->attr.callbegin.call = call;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
797 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
801 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
803 IRN_VRFY_IRG(res, irg);
808 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
812 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
813 irg->end_except = res;
814 IRN_VRFY_IRG (res, irg);
819 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
823 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
830 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
835 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
836 res->attr.filter.proj = proj;
837 res->attr.filter.in_cg = NULL;
838 res->attr.filter.backedge = NULL;
841 assert(get_Proj_pred(res));
842 assert(get_nodes_block(get_Proj_pred(res)));
844 res = optimize_node(res);
845 IRN_VRFY_IRG(res, irg);
851 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
852 ir_node *callee, int arity, ir_node **in, type *tp)
859 NEW_ARR_A(ir_node *, r_in, r_arity);
861 memcpy(&r_in[1], in, sizeof (ir_node *) * arity);
863 res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
865 assert(is_method_type(tp));
866 set_FuncCall_type(res, tp);
867 res->attr.call.callee_arr = NULL;
868 res = optimize_node(res);
869 IRN_VRFY_IRG(res, irg);
874 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
875 return new_rd_Block(NULL, irg, arity, in);
877 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
878 return new_rd_Start(NULL, irg, block);
880 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
881 return new_rd_End(NULL, irg, block);
883 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
884 return new_rd_Jmp(NULL, irg, block);
886 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
887 return new_rd_Cond(NULL, irg, block, c);
889 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
890 ir_node *store, int arity, ir_node **in) {
891 return new_rd_Return(NULL, irg, block, store, arity, in);
893 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
894 ir_node *store, ir_node *obj) {
895 return new_rd_Raise(NULL, irg, block, store, obj);
897 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
898 ir_mode *mode, tarval *con) {
899 return new_rd_Const(NULL, irg, block, mode, con);
901 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
902 symconst_symbol value, symconst_kind symkind) {
903 return new_rd_SymConst(NULL, irg, block, value, symkind);
905 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
906 ir_node *objptr, int n_index, ir_node **index,
908 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
910 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
912 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
914 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
915 ir_node *callee, int arity, ir_node **in,
917 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
919 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
920 ir_node *op1, ir_node *op2, ir_mode *mode) {
921 return new_rd_Add(NULL, irg, block, op1, op2, mode);
923 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
924 ir_node *op1, ir_node *op2, ir_mode *mode) {
925 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
927 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
928 ir_node *op, ir_mode *mode) {
929 return new_rd_Minus(NULL, irg, block, op, mode);
931 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
932 ir_node *op1, ir_node *op2, ir_mode *mode) {
933 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
935 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
936 ir_node *memop, ir_node *op1, ir_node *op2) {
937 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
939 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
940 ir_node *memop, ir_node *op1, ir_node *op2) {
941 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
943 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
944 ir_node *memop, ir_node *op1, ir_node *op2) {
945 return new_rd_Div(NULL, irg, block, memop, op1, op2);
947 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
948 ir_node *memop, ir_node *op1, ir_node *op2) {
949 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
951 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
952 ir_node *op, ir_mode *mode) {
953 return new_rd_Abs(NULL, irg, block, op, mode);
955 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
956 ir_node *op1, ir_node *op2, ir_mode *mode) {
957 return new_rd_And(NULL, irg, block, op1, op2, mode);
959 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
960 ir_node *op1, ir_node *op2, ir_mode *mode) {
961 return new_rd_Or(NULL, irg, block, op1, op2, mode);
963 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
964 ir_node *op1, ir_node *op2, ir_mode *mode) {
965 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
967 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
968 ir_node *op, ir_mode *mode) {
969 return new_rd_Not(NULL, irg, block, op, mode);
971 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
972 ir_node *op1, ir_node *op2) {
973 return new_rd_Cmp(NULL, irg, block, op1, op2);
975 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
976 ir_node *op, ir_node *k, ir_mode *mode) {
977 return new_rd_Shl(NULL, irg, block, op, k, mode);
979 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
980 ir_node *op, ir_node *k, ir_mode *mode) {
981 return new_rd_Shr(NULL, irg, block, op, k, mode);
983 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
984 ir_node *op, ir_node *k, ir_mode *mode) {
985 return new_rd_Shrs(NULL, irg, block, op, k, mode);
987 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
988 ir_node *op, ir_node *k, ir_mode *mode) {
989 return new_rd_Rot(NULL, irg, block, op, k, mode);
991 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
992 ir_node *op, ir_mode *mode) {
993 return new_rd_Conv(NULL, irg, block, op, mode);
995 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
996 return new_rd_Cast(NULL, irg, block, op, to_tp);
998 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
999 ir_node **in, ir_mode *mode) {
1000 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1002 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1003 ir_node *store, ir_node *adr, ir_mode *mode) {
1004 return new_rd_Load(NULL, irg, block, store, adr, mode);
1006 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1007 ir_node *store, ir_node *adr, ir_node *val) {
1008 return new_rd_Store(NULL, irg, block, store, adr, val);
1010 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1011 ir_node *size, type *alloc_type, where_alloc where) {
1012 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1014 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1015 ir_node *ptr, ir_node *size, type *free_type) {
1016 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1018 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1019 return new_rd_Sync(NULL, irg, block, arity, in);
1021 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1022 ir_mode *mode, long proj) {
1023 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1025 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1027 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1029 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1030 int arity, ir_node **in) {
1031 return new_rd_Tuple(NULL, irg, block, arity, in );
1033 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1034 ir_node *val, ir_mode *mode) {
1035 return new_rd_Id(NULL, irg, block, val, mode);
1037 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1038 return new_rd_Bad(irg);
1040 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1041 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1043 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1044 return new_rd_Unknown(irg, m);
1046 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1047 return new_rd_CallBegin(NULL, irg, block, callee);
1049 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1050 return new_rd_EndReg(NULL, irg, block);
1052 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1053 return new_rd_EndExcept(NULL, irg, block);
1055 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1056 return new_rd_Break(NULL, irg, block);
1058 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1059 ir_mode *mode, long proj) {
1060 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1062 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1063 ir_node *callee, int arity, ir_node **in,
1065 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1069 /** ********************/
1070 /** public interfaces */
1071 /** construction tools */
1075 * - create a new Start node in the current block
1077 * @return s - pointer to the created Start node
1082 new_d_Start (dbg_info* db)
1086 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1087 op_Start, mode_T, 0, NULL);
1088 /* res->attr.start.irg = current_ir_graph; */
1090 res = optimize_node(res);
1091 IRN_VRFY_IRG(res, current_ir_graph);
1096 new_d_End (dbg_info* db)
1099 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1100 op_End, mode_X, -1, NULL);
1101 res = optimize_node(res);
1102 IRN_VRFY_IRG(res, current_ir_graph);
1107 /* Constructs a Block with a fixed number of predecessors.
1108 Does set current_block. Can be used with automatic Phi
1109 node construction. */
1111 new_d_Block (dbg_info* db, int arity, ir_node **in)
1115 bool has_unknown = false;
1117 res = new_rd_Block(db, current_ir_graph, arity, in);
1119 /* Create and initialize array for Phi-node construction. */
1120 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1121 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1122 current_ir_graph->n_loc);
1123 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1126 for (i = arity-1; i >= 0; i--)
1127 if (get_irn_op(in[i]) == op_Unknown) {
1132 if (!has_unknown) res = optimize_node(res);
1133 current_ir_graph->current_block = res;
1135 IRN_VRFY_IRG(res, current_ir_graph);
1140 /* ***********************************************************************/
1141 /* Methods necessary for automatic Phi node creation */
1143 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1144 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1145 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1146 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1148 Call Graph: ( A ---> B == A "calls" B)
1150 get_value mature_immBlock
1158 get_r_value_internal |
1162 new_rd_Phi0 new_rd_Phi_in
1164 * *************************************************************************** */
1166 /** Creates a Phi node with 0 predecessors */
1167 static INLINE ir_node *
1168 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1172 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1173 IRN_VRFY_IRG(res, irg);
1177 /* There are two implementations of the Phi node construction. The first
1178 is faster, but does not work for blocks with more than 2 predecessors.
1179 The second works always but is slower and causes more unnecessary Phi
1181 Select the implementations by the following preprocessor flag set in
1183 #if USE_FAST_PHI_CONSTRUCTION
1185 /* This is a stack used for allocating and deallocating nodes in
1186 new_rd_Phi_in. The original implementation used the obstack
1187 to model this stack, now it is explicit. This reduces side effects.
1189 #if USE_EXPLICIT_PHI_IN_STACK
1190 INLINE Phi_in_stack *
1191 new_Phi_in_stack(void) {
1194 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1196 res->stack = NEW_ARR_F (ir_node *, 0);
1203 free_Phi_in_stack(Phi_in_stack *s) {
1204 DEL_ARR_F(s->stack);
1208 free_to_Phi_in_stack(ir_node *phi) {
1209 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1210 current_ir_graph->Phi_in_stack->pos)
1211 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1213 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1215 (current_ir_graph->Phi_in_stack->pos)++;
1218 static INLINE ir_node *
1219 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1220 int arity, ir_node **in) {
1222 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1223 int pos = current_ir_graph->Phi_in_stack->pos;
1227 /* We need to allocate a new node */
1228 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1229 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1231 /* reuse the old node and initialize it again. */
1234 assert (res->kind == k_ir_node);
1235 assert (res->op == op_Phi);
1239 assert (arity >= 0);
1240 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1241 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1243 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1245 (current_ir_graph->Phi_in_stack->pos)--;
1249 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1251 /* Creates a Phi node with a given, fixed array **in of predecessors.
1252 If the Phi node is unnecessary, as the same value reaches the block
1253 through all control flow paths, it is eliminated and the value
1254 returned directly. This constructor is only intended for use in
1255 the automatic Phi node generation triggered by get_value or mature.
1256 The implementation is quite tricky and depends on the fact, that
1257 the nodes are allocated on a stack:
1258 The in array contains predecessors and NULLs. The NULLs appear,
1259 if get_r_value_internal, that computed the predecessors, reached
1260 the same block on two paths. In this case the same value reaches
1261 this block on both paths, there is no definition in between. We need
1262 not allocate a Phi where these path's merge, but we have to communicate
1263 this fact to the caller. This happens by returning a pointer to the
1264 node the caller _will_ allocate. (Yes, we predict the address. We can
1265 do so because the nodes are allocated on the obstack.) The caller then
1266 finds a pointer to itself and, when this routine is called again,
1269 static INLINE ir_node *
1270 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1273 ir_node *res, *known;
1275 /* Allocate a new node on the obstack. This can return a node to
1276 which some of the pointers in the in-array already point.
1277 Attention: the constructor copies the in array, i.e., the later
1278 changes to the array in this routine do not affect the
1279 constructed node! If the in array contains NULLs, there will be
1280 missing predecessors in the returned node. Is this a possible
1281 internal state of the Phi node generation? */
1282 #if USE_EXPLICIT_PHI_IN_STACK
1283 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1285 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1286 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1289 /* The in-array can contain NULLs. These were returned by
1290 get_r_value_internal if it reached the same block/definition on a
1291 second path. The NULLs are replaced by the node itself to
1292 simplify the test in the next loop. */
1293 for (i = 0; i < ins; ++i) {
1298 /* This loop checks whether the Phi has more than one predecessor.
1299 If so, it is a real Phi node and we break the loop. Else the Phi
1300 node merges the same definition on several paths and therefore is
1302 for (i = 0; i < ins; ++i)
1304 if (in[i] == res || in[i] == known) continue;
1312 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1314 #if USE_EXPLICIT_PHI_IN_STACK
1315 free_to_Phi_in_stack(res);
1317 obstack_free (current_ir_graph->obst, res);
1321 res = optimize_node (res);
1322 IRN_VRFY_IRG(res, irg);
1325 /* return the pointer to the Phi node. This node might be deallocated! */
1330 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1333 allocates and returns this node. The routine called to allocate the
1334 node might optimize it away and return a real value, or even a pointer
1335 to a deallocated Phi node on top of the obstack!
1336 This function is called with an in-array of proper size. **/
1338 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1340 ir_node *prevBlock, *res;
1343 /* This loop goes to all predecessor blocks of the block the Phi node is in
1344 and there finds the operands of the Phi node by calling
1345 get_r_value_internal. */
1346 for (i = 1; i <= ins; ++i) {
1347 assert (block->in[i]);
1348 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1350 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1353 /* After collecting all predecessors into the array nin a new Phi node
1354 with these predecessors is created. This constructor contains an
1355 optimization: If all predecessors of the Phi node are identical it
1356 returns the only operand instead of a new Phi node. If the value
1357 passes two different control flow edges without being defined, and
1358 this is the second path treated, a pointer to the node that will be
1359 allocated for the first path (recursion) is returned. We already
1360 know the address of this node, as it is the next node to be allocated
1361 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1362 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1364 /* Now we now the value for "pos" and can enter it in the array with
1365 all known local variables. Attention: this might be a pointer to
1366 a node, that later will be allocated!!! See new_rd_Phi_in.
1367 If this is called in mature, after some set_value in the same block,
1368 the proper value must not be overwritten:
1370 get_value (makes Phi0, put's it into graph_arr)
1371 set_value (overwrites Phi0 in graph_arr)
1372 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1375 if (!block->attr.block.graph_arr[pos]) {
1376 block->attr.block.graph_arr[pos] = res;
1378 /* printf(" value already computed by %s\n",
1379 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1385 /* This function returns the last definition of a variable. In case
1386 this variable was last defined in a previous block, Phi nodes are
1387 inserted. If the part of the firm graph containing the definition
1388 is not yet constructed, a dummy Phi node is returned. */
1390 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1393 /* There are 4 cases to treat.
1395 1. The block is not mature and we visit it the first time. We can not
1396 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1397 predecessors is returned. This node is added to the linked list (field
1398 "link") of the containing block to be completed when this block is
1399 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1402 2. The value is already known in this block, graph_arr[pos] is set and we
1403 visit the block the first time. We can return the value without
1404 creating any new nodes.
1406 3. The block is mature and we visit it the first time. A Phi node needs
1407 to be created (phi_merge). If the Phi is not needed, as all it's
1408 operands are the same value reaching the block through different
1409 paths, it's optimized away and the value itself is returned.
1411 4. The block is mature, and we visit it the second time. Now two
1412 subcases are possible:
1413 * The value was computed completely the last time we were here. This
1414 is the case if there is no loop. We can return the proper value.
1415 * The recursion that visited this node and set the flag did not
1416 return yet. We are computing a value in a loop and need to
1417 break the recursion without knowing the result yet.
1418 @@@ strange case. Straight forward we would create a Phi before
1419 starting the computation of it's predecessors. In this case we will
1420 find a Phi here in any case. The problem is that this implementation
1421 only creates a Phi after computing the predecessors, so that it is
1422 hard to compute self references of this Phi. @@@
1423 There is no simple check for the second subcase. Therefore we check
1424 for a second visit and treat all such cases as the second subcase.
1425 Anyways, the basic situation is the same: we reached a block
1426 on two paths without finding a definition of the value: No Phi
1427 nodes are needed on both paths.
1428 We return this information "Two paths, no Phi needed" by a very tricky
1429 implementation that relies on the fact that an obstack is a stack and
1430 will return a node with the same address on different allocations.
1431 Look also at phi_merge and new_rd_phi_in to understand this.
1432 @@@ Unfortunately this does not work, see testprogram
1433 three_cfpred_example.
1437 /* case 4 -- already visited. */
1438 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1440 /* visited the first time */
1441 set_irn_visited(block, get_irg_visited(current_ir_graph));
1443 /* Get the local valid value */
1444 res = block->attr.block.graph_arr[pos];
1446 /* case 2 -- If the value is actually computed, return it. */
1447 if (res) return res;
1449 if (block->attr.block.matured) { /* case 3 */
1451 /* The Phi has the same amount of ins as the corresponding block. */
1452 int ins = get_irn_arity(block);
1454 NEW_ARR_A (ir_node *, nin, ins);
1456 /* Phi merge collects the predecessors and then creates a node. */
1457 res = phi_merge (block, pos, mode, nin, ins);
1459 } else { /* case 1 */
1460 /* The block is not mature, we don't know how many in's are needed. A Phi
1461 with zero predecessors is created. Such a Phi node is called Phi0
1462 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1463 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1465 The Phi0 has to remember the pos of it's internal value. If the real
1466 Phi is computed, pos is used to update the array with the local
1469 res = new_rd_Phi0 (current_ir_graph, block, mode);
1470 res->attr.phi0_pos = pos;
1471 res->link = block->link;
1475 /* If we get here, the frontend missed a use-before-definition error */
1478 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1479 assert (mode->code >= irm_F && mode->code <= irm_P);
1480 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1481 tarval_mode_null[mode->code]);
1484 /* The local valid value is available now. */
1485 block->attr.block.graph_arr[pos] = res;
1493 it starts the recursion. This causes an Id at the entry of
1494 every block that has no definition of the value! **/
1496 #if USE_EXPLICIT_PHI_IN_STACK
1498 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1499 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1502 static INLINE ir_node *
1503 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1504 ir_node **in, int ins, ir_node *phi0)
1507 ir_node *res, *known;
1509 /* Allocate a new node on the obstack. The allocation copies the in
1511 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1512 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1514 /* This loop checks whether the Phi has more than one predecessor.
1515 If so, it is a real Phi node and we break the loop. Else the
1516 Phi node merges the same definition on several paths and therefore
1517 is not needed. Don't consider Bad nodes! */
1519 for (i=0; i < ins; ++i)
1523 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1525 /* Optimize self referencing Phis: We can't detect them yet properly, as
1526 they still refer to the Phi0 they will replace. So replace right now. */
1527 if (phi0 && in[i] == phi0) in[i] = res;
1529 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1537 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1540 obstack_free (current_ir_graph->obst, res);
1541 if (is_Phi(known)) {
1542 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1543 order, an enclosing Phi know may get superfluous. */
1544 res = optimize_in_place_2(known);
1545 if (res != known) { exchange(known, res); }
1550 /* A undefined value, e.g., in unreachable code. */
1554 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1555 IRN_VRFY_IRG(res, irg);
1556 /* Memory Phis in endless loops must be kept alive.
1557 As we can't distinguish these easily we keep all of them alive. */
1558 if ((res->op == op_Phi) && (mode == mode_M))
1559 add_End_keepalive(irg->end, res);
1566 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1568 #if PRECISE_EXC_CONTEXT
1570 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1572 /* Construct a new frag_array for node n.
1573 Copy the content from the current graph_arr of the corresponding block:
1574 this is the current state.
1575 Set ProjM(n) as current memory state.
1576 Further the last entry in frag_arr of current block points to n. This
1577 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1579 static INLINE ir_node ** new_frag_arr (ir_node *n)
1584 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1585 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1586 sizeof(ir_node *)*current_ir_graph->n_loc);
1588 /* turn off optimization before allocating Proj nodes, as res isn't
1590 opt = get_opt_optimize(); set_optimize(0);
1591 /* Here we rely on the fact that all frag ops have Memory as first result! */
1592 if (get_irn_op(n) == op_Call)
1593 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1595 assert((pn_Quot_M == pn_DivMod_M) &&
1596 (pn_Quot_M == pn_Div_M) &&
1597 (pn_Quot_M == pn_Mod_M) &&
1598 (pn_Quot_M == pn_Load_M) &&
1599 (pn_Quot_M == pn_Store_M) &&
1600 (pn_Quot_M == pn_Alloc_M) );
1601 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1605 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1610 * returns the frag_arr from a node
1612 static INLINE ir_node **
1613 get_frag_arr (ir_node *n) {
1614 switch (get_irn_opcode(n)) {
1616 return n->attr.call.frag_arr;
1618 return n->attr.a.frag_arr;
1620 return n->attr.load.frag_arr;
1622 return n->attr.store.frag_arr;
1624 return n->attr.except.frag_arr;
1629 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1631 if (!frag_arr[pos]) frag_arr[pos] = val;
1632 if (frag_arr[current_ir_graph->n_loc - 1]) {
1633 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1634 assert(arr != frag_arr && "Endless recursion detected");
1635 set_frag_value(arr, pos, val);
1640 for (i = 0; i < 1000; ++i) {
1641 if (!frag_arr[pos]) {
1642 frag_arr[pos] = val;
1644 if (frag_arr[current_ir_graph->n_loc - 1]) {
1645 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1651 assert(0 && "potential endless recursion");
1656 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1660 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1662 frag_arr = get_frag_arr(cfOp);
1663 res = frag_arr[pos];
1665 if (block->attr.block.graph_arr[pos]) {
1666 /* There was a set_value after the cfOp and no get_value before that
1667 set_value. We must build a Phi node now. */
1668 if (block->attr.block.matured) {
1669 int ins = get_irn_arity(block);
1671 NEW_ARR_A (ir_node *, nin, ins);
1672 res = phi_merge(block, pos, mode, nin, ins);
1674 res = new_rd_Phi0 (current_ir_graph, block, mode);
1675 res->attr.phi0_pos = pos;
1676 res->link = block->link;
1680 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1681 but this should be better: (remove comment if this works) */
1682 /* It's a Phi, we can write this into all graph_arrs with NULL */
1683 set_frag_value(block->attr.block.graph_arr, pos, res);
1685 res = get_r_value_internal(block, pos, mode);
1686 set_frag_value(block->attr.block.graph_arr, pos, res);
1694 computes the predecessors for the real phi node, and then
1695 allocates and returns this node. The routine called to allocate the
1696 node might optimize it away and return a real value.
1697 This function must be called with an in-array of proper size. **/
1699 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1701 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1704 /* If this block has no value at pos create a Phi0 and remember it
1705 in graph_arr to break recursions.
1706 Else we may not set graph_arr as there a later value is remembered. */
1708 if (!block->attr.block.graph_arr[pos]) {
1709 if (block == get_irg_start_block(current_ir_graph)) {
1710 /* Collapsing to Bad tarvals is no good idea.
1711 So we call a user-supplied routine here that deals with this case as
1712 appropriate for the given language. Sorryly the only help we can give
1713 here is the position.
1715 Even if all variables are defined before use, it can happen that
1716 we get to the start block, if a cond has been replaced by a tuple
1717 (bad, jmp). In this case we call the function needlessly, eventually
1718 generating an non existant error.
1719 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1722 if (default_initialize_local_variable)
1723 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1725 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1726 /* We don't need to care about exception ops in the start block.
1727 There are none by definition. */
1728 return block->attr.block.graph_arr[pos];
1730 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1731 block->attr.block.graph_arr[pos] = phi0;
1732 #if PRECISE_EXC_CONTEXT
1733 if (get_opt_precise_exc_context()) {
1734 /* Set graph_arr for fragile ops. Also here we should break recursion.
1735 We could choose a cyclic path through an cfop. But the recursion would
1736 break at some point. */
1737 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1743 /* This loop goes to all predecessor blocks of the block the Phi node
1744 is in and there finds the operands of the Phi node by calling
1745 get_r_value_internal. */
1746 for (i = 1; i <= ins; ++i) {
1747 prevCfOp = skip_Proj(block->in[i]);
1749 if (is_Bad(prevCfOp)) {
1750 /* In case a Cond has been optimized we would get right to the start block
1751 with an invalid definition. */
1752 nin[i-1] = new_Bad();
1755 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1757 if (!is_Bad(prevBlock)) {
1758 #if PRECISE_EXC_CONTEXT
1759 if (get_opt_precise_exc_context() &&
1760 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1761 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1762 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1765 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1767 nin[i-1] = new_Bad();
1771 /* We want to pass the Phi0 node to the constructor: this finds additional
1772 optimization possibilities.
1773 The Phi0 node either is allocated in this function, or it comes from
1774 a former call to get_r_value_internal. In this case we may not yet
1775 exchange phi0, as this is done in mature_immBlock. */
1777 phi0_all = block->attr.block.graph_arr[pos];
1778 if (!((get_irn_op(phi0_all) == op_Phi) &&
1779 (get_irn_arity(phi0_all) == 0) &&
1780 (get_nodes_block(phi0_all) == block)))
1786 /* After collecting all predecessors into the array nin a new Phi node
1787 with these predecessors is created. This constructor contains an
1788 optimization: If all predecessors of the Phi node are identical it
1789 returns the only operand instead of a new Phi node. */
1790 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1792 /* In case we allocated a Phi0 node at the beginning of this procedure,
1793 we need to exchange this Phi0 with the real Phi. */
1795 exchange(phi0, res);
1796 block->attr.block.graph_arr[pos] = res;
1797 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1798 only an optimization. */
1804 /* This function returns the last definition of a variable. In case
1805 this variable was last defined in a previous block, Phi nodes are
1806 inserted. If the part of the firm graph containing the definition
1807 is not yet constructed, a dummy Phi node is returned. */
1809 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1812 /* There are 4 cases to treat.
1814 1. The block is not mature and we visit it the first time. We can not
1815 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1816 predecessors is returned. This node is added to the linked list (field
1817 "link") of the containing block to be completed when this block is
1818 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1821 2. The value is already known in this block, graph_arr[pos] is set and we
1822 visit the block the first time. We can return the value without
1823 creating any new nodes.
1825 3. The block is mature and we visit it the first time. A Phi node needs
1826 to be created (phi_merge). If the Phi is not needed, as all it's
1827 operands are the same value reaching the block through different
1828 paths, it's optimized away and the value itself is returned.
1830 4. The block is mature, and we visit it the second time. Now two
1831 subcases are possible:
1832 * The value was computed completely the last time we were here. This
1833 is the case if there is no loop. We can return the proper value.
1834 * The recursion that visited this node and set the flag did not
1835 return yet. We are computing a value in a loop and need to
1836 break the recursion. This case only happens if we visited
1837 the same block with phi_merge before, which inserted a Phi0.
1838 So we return the Phi0.
1841 /* case 4 -- already visited. */
1842 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1843 /* As phi_merge allocates a Phi0 this value is always defined. Here
1844 is the critical difference of the two algorithms. */
1845 assert(block->attr.block.graph_arr[pos]);
1846 return block->attr.block.graph_arr[pos];
1849 /* visited the first time */
1850 set_irn_visited(block, get_irg_visited(current_ir_graph));
1852 /* Get the local valid value */
1853 res = block->attr.block.graph_arr[pos];
1855 /* case 2 -- If the value is actually computed, return it. */
1856 if (res) { return res; };
1858 if (block->attr.block.matured) { /* case 3 */
1860 /* The Phi has the same amount of ins as the corresponding block. */
1861 int ins = get_irn_arity(block);
1863 NEW_ARR_A (ir_node *, nin, ins);
1865 /* Phi merge collects the predecessors and then creates a node. */
1866 res = phi_merge (block, pos, mode, nin, ins);
1868 } else { /* case 1 */
1869 /* The block is not mature, we don't know how many in's are needed. A Phi
1870 with zero predecessors is created. Such a Phi node is called Phi0
1871 node. The Phi0 is then added to the list of Phi0 nodes in this block
1872 to be matured by mature_immBlock later.
1873 The Phi0 has to remember the pos of it's internal value. If the real
1874 Phi is computed, pos is used to update the array with the local
1876 res = new_rd_Phi0 (current_ir_graph, block, mode);
1877 res->attr.phi0_pos = pos;
1878 res->link = block->link;
1882 /* If we get here, the frontend missed a use-before-definition error */
1885 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1886 assert (mode->code >= irm_F && mode->code <= irm_P);
1887 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1888 get_mode_null(mode));
1891 /* The local valid value is available now. */
1892 block->attr.block.graph_arr[pos] = res;
1897 #endif /* USE_FAST_PHI_CONSTRUCTION */
1899 /* ************************************************************************** */
1901 /** Finalize a Block node, when all control flows are known. */
1902 /** Acceptable parameters are only Block nodes. */
1904 mature_immBlock (ir_node *block)
1911 assert (get_irn_opcode(block) == iro_Block);
1912 /* @@@ should be commented in
1913 assert (!get_Block_matured(block) && "Block already matured"); */
1915 if (!get_Block_matured(block)) {
1916 ins = ARR_LEN (block->in)-1;
1917 /* Fix block parameters */
1918 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1920 /* An array for building the Phi nodes. */
1921 NEW_ARR_A (ir_node *, nin, ins);
1923 /* Traverse a chain of Phi nodes attached to this block and mature
1925 for (n = block->link; n; n=next) {
1926 inc_irg_visited(current_ir_graph);
1928 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1931 block->attr.block.matured = 1;
1933 /* Now, as the block is a finished firm node, we can optimize it.
1934 Since other nodes have been allocated since the block was created
1935 we can not free the node on the obstack. Therefore we have to call
1937 Unfortunately the optimization does not change a lot, as all allocated
1938 nodes refer to the unoptimized node.
1939 We can call _2, as global cse has no effect on blocks. */
1940 block = optimize_in_place_2(block);
1941 IRN_VRFY_IRG(block, current_ir_graph);
1946 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1948 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1953 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1955 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1960 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1962 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1968 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1970 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1975 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1977 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1982 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1985 assert(arg->op == op_Cond);
1986 arg->attr.c.kind = fragmentary;
1987 arg->attr.c.default_proj = max_proj;
1988 res = new_Proj (arg, mode_X, max_proj);
1993 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1995 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2000 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2002 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2006 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2008 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2013 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2015 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2020 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2022 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2028 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2030 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2035 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2037 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2042 * allocate the frag array
2044 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2045 if (get_opt_precise_exc_context()) {
2046 if ((current_ir_graph->phase_state == phase_building) &&
2047 (get_irn_op(res) == op) && /* Could be optimized away. */
2048 !*frag_store) /* Could be a cse where the arr is already set. */ {
2049 *frag_store = new_frag_arr(res);
2056 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2059 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2061 #if PRECISE_EXC_CONTEXT
2062 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2069 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2072 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2074 #if PRECISE_EXC_CONTEXT
2075 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2082 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2085 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2087 #if PRECISE_EXC_CONTEXT
2088 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2095 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2098 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2100 #if PRECISE_EXC_CONTEXT
2101 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2108 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2110 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2115 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2117 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2122 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2124 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2129 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2131 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2136 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2138 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2143 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2145 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2150 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2152 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2157 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2159 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2164 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2166 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2171 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2173 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2178 new_d_Jmp (dbg_info* db)
2180 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2184 new_d_Cond (dbg_info* db, ir_node *c)
2186 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2190 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2194 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2195 store, callee, arity, in, tp);
2196 #if PRECISE_EXC_CONTEXT
2197 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2204 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2206 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2211 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2213 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2218 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2221 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2223 #if PRECISE_EXC_CONTEXT
2224 allocate_frag_arr(res, op_Load, &res->attr.load.frag_arr); /* Could be optimized away. */
2231 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2234 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2236 #if PRECISE_EXC_CONTEXT
2237 allocate_frag_arr(res, op_Store, &res->attr.store.frag_arr); /* Could be optimized away. */
2244 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2248 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2249 store, size, alloc_type, where);
2250 #if PRECISE_EXC_CONTEXT
2251 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2258 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2260 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2261 store, ptr, size, free_type);
2265 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2266 /* GL: objptr was called frame before. Frame was a bad choice for the name
2267 as the operand could as well be a pointer to a dynamic object. */
2269 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2270 store, objptr, 0, NULL, ent);
2274 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2276 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2277 store, objptr, n_index, index, sel);
2281 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2283 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2284 store, objptr, ent));
2288 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2290 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2295 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2297 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2302 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2304 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2312 return __new_d_Bad();
2316 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2318 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2323 new_d_Unknown (ir_mode *m)
2325 return new_rd_Unknown(current_ir_graph, m);
2329 new_d_CallBegin (dbg_info *db, ir_node *call)
2332 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2337 new_d_EndReg (dbg_info *db)
2340 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2345 new_d_EndExcept (dbg_info *db)
2348 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2353 new_d_Break (dbg_info *db)
2355 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2359 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2361 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2366 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2370 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2371 callee, arity, in, tp);
2376 /* ********************************************************************* */
2377 /* Comfortable interface with automatic Phi node construction. */
2378 /* (Uses also constructors of ?? interface, except new_Block. */
2379 /* ********************************************************************* */
2381 /* * Block construction **/
2382 /* immature Block without predecessors */
2383 ir_node *new_d_immBlock (dbg_info* db) {
2386 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2387 /* creates a new dynamic in-array as length of in is -1 */
2388 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2389 current_ir_graph->current_block = res;
2390 res->attr.block.matured = 0;
2391 /* res->attr.block.exc = exc_normal; */
2392 /* res->attr.block.handler_entry = 0; */
2393 res->attr.block.irg = current_ir_graph;
2394 res->attr.block.backedge = NULL;
2395 res->attr.block.in_cg = NULL;
2396 res->attr.block.cg_backedge = NULL;
2397 set_Block_block_visited(res, 0);
2399 /* Create and initialize array for Phi-node construction. */
2400 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2401 current_ir_graph->n_loc);
2402 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2404 /* Immature block may not be optimized! */
2405 IRN_VRFY_IRG(res, current_ir_graph);
2411 new_immBlock (void) {
2412 return new_d_immBlock(NULL);
2415 /* add an adge to a jmp/control flow node */
2417 add_immBlock_pred (ir_node *block, ir_node *jmp)
2419 if (block->attr.block.matured) {
2420 assert(0 && "Error: Block already matured!\n");
2423 assert(jmp != NULL);
2424 ARR_APP1(ir_node *, block->in, jmp);
2428 /* changing the current block */
2430 set_cur_block (ir_node *target)
2432 current_ir_graph->current_block = target;
2435 /* ************************ */
2436 /* parameter administration */
2438 /* get a value from the parameter array from the current block by its index */
2440 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2442 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2443 inc_irg_visited(current_ir_graph);
2445 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2447 /* get a value from the parameter array from the current block by its index */
2449 get_value (int pos, ir_mode *mode)
2451 return get_d_value(NULL, pos, mode);
2454 /* set a value at position pos in the parameter array from the current block */
2456 set_value (int pos, ir_node *value)
2458 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2459 assert(pos+1 < current_ir_graph->n_loc);
2460 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2463 /* get the current store */
2467 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2468 /* GL: one could call get_value instead */
2469 inc_irg_visited(current_ir_graph);
2470 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2473 /* set the current store */
2475 set_store (ir_node *store)
2477 /* GL: one could call set_value instead */
2478 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2479 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2483 keep_alive (ir_node *ka)
2485 add_End_keepalive(current_ir_graph->end, ka);
2488 /** Useful access routines **/
2489 /* Returns the current block of the current graph. To set the current
2490 block use set_cur_block. */
2491 ir_node *get_cur_block() {
2492 return get_irg_current_block(current_ir_graph);
2495 /* Returns the frame type of the current graph */
2496 type *get_cur_frame_type() {
2497 return get_irg_frame_type(current_ir_graph);
2501 /* ********************************************************************* */
2504 /* call once for each run of the library */
2506 init_cons (default_initialize_local_variable_func_t *func)
2508 default_initialize_local_variable = func;
2511 /* call for each graph */
2513 finalize_cons (ir_graph *irg) {
2514 irg->phase_state = phase_high;
2518 ir_node *new_Block(int arity, ir_node **in) {
2519 return new_d_Block(NULL, arity, in);
2521 ir_node *new_Start (void) {
2522 return new_d_Start(NULL);
2524 ir_node *new_End (void) {
2525 return new_d_End(NULL);
2527 ir_node *new_Jmp (void) {
2528 return new_d_Jmp(NULL);
2530 ir_node *new_Cond (ir_node *c) {
2531 return new_d_Cond(NULL, c);
2533 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2534 return new_d_Return(NULL, store, arity, in);
2536 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2537 return new_d_Raise(NULL, store, obj);
2539 ir_node *new_Const (ir_mode *mode, tarval *con) {
2540 return new_d_Const(NULL, mode, con);
2542 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2543 return new_d_SymConst(NULL, value, kind);
2545 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2546 return new_d_simpleSel(NULL, store, objptr, ent);
2548 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2550 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2552 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2553 return new_d_InstOf (NULL, store, objptr, ent);
2555 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2557 return new_d_Call(NULL, store, callee, arity, in, tp);
2559 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2560 return new_d_Add(NULL, op1, op2, mode);
2562 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2563 return new_d_Sub(NULL, op1, op2, mode);
2565 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2566 return new_d_Minus(NULL, op, mode);
2568 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2569 return new_d_Mul(NULL, op1, op2, mode);
2571 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2572 return new_d_Quot(NULL, memop, op1, op2);
2574 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2575 return new_d_DivMod(NULL, memop, op1, op2);
2577 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2578 return new_d_Div(NULL, memop, op1, op2);
2580 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2581 return new_d_Mod(NULL, memop, op1, op2);
2583 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2584 return new_d_Abs(NULL, op, mode);
2586 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2587 return new_d_And(NULL, op1, op2, mode);
2589 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2590 return new_d_Or(NULL, op1, op2, mode);
2592 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2593 return new_d_Eor(NULL, op1, op2, mode);
2595 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2596 return new_d_Not(NULL, op, mode);
2598 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2599 return new_d_Shl(NULL, op, k, mode);
2601 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2602 return new_d_Shr(NULL, op, k, mode);
2604 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2605 return new_d_Shrs(NULL, op, k, mode);
2607 #define new_Rotate new_Rot
2608 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2609 return new_d_Rot(NULL, op, k, mode);
2611 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2612 return new_d_Cmp(NULL, op1, op2);
2614 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2615 return new_d_Conv(NULL, op, mode);
2617 ir_node *new_Cast (ir_node *op, type *to_tp) {
2618 return new_d_Cast(NULL, op, to_tp);
2620 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2621 return new_d_Phi(NULL, arity, in, mode);
2623 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2624 return new_d_Load(NULL, store, addr, mode);
2626 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2627 return new_d_Store(NULL, store, addr, val);
2629 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2630 where_alloc where) {
2631 return new_d_Alloc(NULL, store, size, alloc_type, where);
2633 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2635 return new_d_Free(NULL, store, ptr, size, free_type);
2637 ir_node *new_Sync (int arity, ir_node **in) {
2638 return new_d_Sync(NULL, arity, in);
2640 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2641 return new_d_Proj(NULL, arg, mode, proj);
2643 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2644 return new_d_defaultProj(NULL, arg, max_proj);
2646 ir_node *new_Tuple (int arity, ir_node **in) {
2647 return new_d_Tuple(NULL, arity, in);
2649 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2650 return new_d_Id(NULL, val, mode);
2652 ir_node *new_Bad (void) {
2655 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2656 return new_d_Confirm (NULL, val, bound, cmp);
2658 ir_node *new_Unknown(ir_mode *m) {
2659 return new_d_Unknown(m);
2661 ir_node *new_CallBegin (ir_node *callee) {
2662 return new_d_CallBegin(NULL, callee);
2664 ir_node *new_EndReg (void) {
2665 return new_d_EndReg(NULL);
2667 ir_node *new_EndExcept (void) {
2668 return new_d_EndExcept(NULL);
2670 ir_node *new_Break (void) {
2671 return new_d_Break(NULL);
2673 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2674 return new_d_Filter(NULL, arg, mode, proj);
2676 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2677 return new_d_FuncCall(NULL, callee, arity, in, tp);