3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /*** ******************************************** */
56 /** privat interfaces, for professional use only */
58 /* Constructs a Block with a fixed number of predecessors.
59 Does not set current_block. Can not be used with automatic
60 Phi node construction. */
62 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
66 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
67 set_Block_matured(res, 1);
68 set_Block_block_visited(res, 0);
70 /* res->attr.block.exc = exc_normal; */
71 /* res->attr.block.handler_entry = 0; */
72 res->attr.block.irg = irg;
73 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
74 res->attr.block.in_cg = NULL;
75 res->attr.block.cg_backedge = NULL;
77 IRN_VRFY_IRG(res, irg);
82 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
86 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
87 /* res->attr.start.irg = irg; */
89 IRN_VRFY_IRG(res, irg);
94 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
100 IRN_VRFY_IRG(res, irg);
104 /* Creates a Phi node with all predecessors. Calling this constructor
105 is only allowed if the corresponding block is mature. */
107 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
111 bool has_unknown = false;
113 /* Don't assert that block matured: the use of this constructor is strongly
115 if ( get_Block_matured(block) )
116 assert( get_irn_arity(block) == arity );
118 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
120 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
122 for (i = arity-1; i >= 0; i--)
123 if (get_irn_op(in[i]) == op_Unknown) {
128 if (!has_unknown) res = optimize_node (res);
129 IRN_VRFY_IRG(res, irg);
131 /* Memory Phis in endless loops must be kept alive.
132 As we can't distinguish these easily we keep all of them alive. */
133 if ((res->op == op_Phi) && (mode == mode_M))
134 add_End_keepalive(irg->end, res);
139 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
143 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
144 res->attr.con.tv = con;
145 set_Const_type(res, tp); /* Call method because of complex assertion. */
146 res = optimize_node (res);
147 assert(get_Const_type(res) == tp);
148 IRN_VRFY_IRG(res, irg);
154 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
156 type *tp = unknown_type;
157 /* removing this somehow causes errors in jack. */
158 return new_rd_Const_type (db, irg, block, mode, con, tp);
162 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
166 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
167 res = optimize_node(res);
168 IRN_VRFY_IRG(res, irg);
173 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
178 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
179 res->attr.proj = proj;
182 assert(get_Proj_pred(res));
183 assert(get_nodes_Block(get_Proj_pred(res)));
185 res = optimize_node(res);
187 IRN_VRFY_IRG(res, irg);
193 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
197 assert(arg->op == op_Cond);
198 arg->attr.c.kind = fragmentary;
199 arg->attr.c.default_proj = max_proj;
200 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
205 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
209 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
210 res = optimize_node(res);
211 IRN_VRFY_IRG(res, irg);
216 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
220 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
221 res->attr.cast.totype = to_tp;
222 res = optimize_node(res);
223 IRN_VRFY_IRG(res, irg);
228 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
232 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
233 res = optimize_node (res);
234 IRN_VRFY_IRG(res, irg);
239 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
240 ir_node *op1, ir_node *op2, ir_mode *mode)
247 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
248 res = optimize_node(res);
249 IRN_VRFY_IRG(res, irg);
254 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
255 ir_node *op1, ir_node *op2, ir_mode *mode)
262 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
263 res = optimize_node (res);
264 IRN_VRFY_IRG(res, irg);
269 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
270 ir_node *op, ir_mode *mode)
274 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
275 res = optimize_node(res);
276 IRN_VRFY_IRG(res, irg);
281 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
282 ir_node *op1, ir_node *op2, ir_mode *mode)
289 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
290 res = optimize_node(res);
291 IRN_VRFY_IRG(res, irg);
296 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
297 ir_node *memop, ir_node *op1, ir_node *op2)
305 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
306 res = optimize_node(res);
307 IRN_VRFY_IRG(res, irg);
312 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
313 ir_node *memop, ir_node *op1, ir_node *op2)
321 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
322 res = optimize_node(res);
323 IRN_VRFY_IRG(res, irg);
328 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
329 ir_node *memop, ir_node *op1, ir_node *op2)
337 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
338 res = optimize_node(res);
339 IRN_VRFY_IRG(res, irg);
344 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
345 ir_node *memop, ir_node *op1, ir_node *op2)
353 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
354 res = optimize_node(res);
355 IRN_VRFY_IRG(res, irg);
360 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op1, ir_node *op2, ir_mode *mode)
368 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
369 res = optimize_node(res);
370 IRN_VRFY_IRG(res, irg);
375 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
376 ir_node *op1, ir_node *op2, ir_mode *mode)
383 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
384 res = optimize_node(res);
385 IRN_VRFY_IRG(res, irg);
390 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
391 ir_node *op1, ir_node *op2, ir_mode *mode)
398 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
399 res = optimize_node (res);
400 IRN_VRFY_IRG(res, irg);
405 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
406 ir_node *op, ir_mode *mode)
410 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
418 ir_node *op, ir_node *k, ir_mode *mode)
425 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
433 ir_node *op, ir_node *k, ir_mode *mode)
440 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
441 res = optimize_node(res);
442 IRN_VRFY_IRG(res, irg);
447 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
448 ir_node *op, ir_node *k, ir_mode *mode)
455 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
470 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
478 ir_node *op, ir_mode *mode)
482 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
483 res = optimize_node (res);
484 IRN_VRFY_IRG(res, irg);
489 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
490 ir_node *op1, ir_node *op2)
497 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
498 res = optimize_node(res);
499 IRN_VRFY_IRG(res, irg);
504 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
508 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
509 res = optimize_node (res);
510 IRN_VRFY_IRG (res, irg);
515 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
519 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
520 res->attr.c.kind = dense;
521 res->attr.c.default_proj = 0;
522 res = optimize_node (res);
523 IRN_VRFY_IRG(res, irg);
528 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
529 ir_node *callee, int arity, ir_node **in, type *tp)
536 NEW_ARR_A(ir_node *, r_in, r_arity);
539 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
541 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
543 assert(is_method_type(tp));
544 set_Call_type(res, tp);
545 res->attr.call.callee_arr = NULL;
546 res = optimize_node(res);
547 IRN_VRFY_IRG(res, irg);
552 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
553 ir_node *store, int arity, ir_node **in)
560 NEW_ARR_A (ir_node *, r_in, r_arity);
562 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
563 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
564 res = optimize_node(res);
565 IRN_VRFY_IRG(res, irg);
570 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
577 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
578 res = optimize_node(res);
579 IRN_VRFY_IRG(res, irg);
584 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
585 ir_node *store, ir_node *adr)
592 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
593 res = optimize_node(res);
594 IRN_VRFY_IRG(res, irg);
599 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
600 ir_node *store, ir_node *adr, ir_node *val)
608 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
615 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
616 ir_node *size, type *alloc_type, where_alloc where)
623 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
624 res->attr.a.where = where;
625 res->attr.a.type = alloc_type;
626 res = optimize_node(res);
627 IRN_VRFY_IRG(res, irg);
632 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
633 ir_node *ptr, ir_node *size, type *free_type)
641 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
642 res->attr.f = free_type;
643 res = optimize_node(res);
644 IRN_VRFY_IRG(res, irg);
649 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
650 int arity, ir_node **in, entity *ent)
656 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
659 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
662 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
663 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
664 res->attr.s.ent = ent;
665 res = optimize_node(res);
666 IRN_VRFY_IRG(res, irg);
671 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
672 ir_node *objptr, type *ent)
679 NEW_ARR_A(ir_node *, r_in, r_arity);
683 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
684 res->attr.io.ent = ent;
686 /* res = optimize(res); */
687 IRN_VRFY_IRG(res, irg);
692 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
693 symconst_kind symkind, type *tp)
698 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
702 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
704 res->attr.i.num = symkind;
705 res->attr.i.sym = value;
708 res = optimize_node(res);
709 IRN_VRFY_IRG(res, irg);
714 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
715 symconst_kind symkind)
717 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
721 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
722 symconst_symbol sym = {(type *)symbol};
723 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
726 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
727 symconst_symbol sym = {(type *)symbol};
728 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
731 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
732 symconst_symbol sym = {symbol};
733 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
736 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
737 symconst_symbol sym = {symbol};
738 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
742 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
746 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
747 res = optimize_node(res);
748 IRN_VRFY_IRG(res, irg);
753 new_rd_Bad (ir_graph *irg)
759 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
761 ir_node *in[2], *res;
765 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
766 res->attr.confirm_cmp = cmp;
767 res = optimize_node (res);
768 IRN_VRFY_IRG(res, irg);
773 new_rd_Unknown (ir_graph *irg, ir_mode *m)
775 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
779 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
784 in[0] = get_Call_ptr(call);
785 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
786 /* res->attr.callbegin.irg = irg; */
787 res->attr.callbegin.call = call;
788 res = optimize_node(res);
789 IRN_VRFY_IRG(res, irg);
794 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
798 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
800 IRN_VRFY_IRG(res, irg);
805 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
809 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
810 irg->end_except = res;
811 IRN_VRFY_IRG (res, irg);
816 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
820 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
821 res = optimize_node(res);
822 IRN_VRFY_IRG(res, irg);
827 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
832 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
833 res->attr.filter.proj = proj;
834 res->attr.filter.in_cg = NULL;
835 res->attr.filter.backedge = NULL;
838 assert(get_Proj_pred(res));
839 assert(get_nodes_Block(get_Proj_pred(res)));
841 res = optimize_node(res);
842 IRN_VRFY_IRG(res, irg);
848 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
849 ir_node *callee, int arity, ir_node **in, type *tp)
856 NEW_ARR_A(ir_node *, r_in, r_arity);
858 memcpy(&r_in[1], in, sizeof (ir_node *) * arity);
860 res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
862 assert(is_method_type(tp));
863 set_FuncCall_type(res, tp);
864 res->attr.call.callee_arr = NULL;
865 res = optimize_node(res);
866 IRN_VRFY_IRG(res, irg);
871 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
872 return new_rd_Block(NULL, irg, arity, in);
874 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
875 return new_rd_Start(NULL, irg, block);
877 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
878 return new_rd_End(NULL, irg, block);
880 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
881 return new_rd_Jmp(NULL, irg, block);
883 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
884 return new_rd_Cond(NULL, irg, block, c);
886 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
887 ir_node *store, int arity, ir_node **in) {
888 return new_rd_Return(NULL, irg, block, store, arity, in);
890 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
891 ir_node *store, ir_node *obj) {
892 return new_rd_Raise(NULL, irg, block, store, obj);
894 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
895 ir_mode *mode, tarval *con) {
896 return new_rd_Const(NULL, irg, block, mode, con);
898 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
899 symconst_symbol value, symconst_kind symkind) {
900 return new_rd_SymConst(NULL, irg, block, value, symkind);
902 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
903 ir_node *objptr, int n_index, ir_node **index,
905 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
907 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
909 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
911 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
912 ir_node *callee, int arity, ir_node **in,
914 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
916 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
917 ir_node *op1, ir_node *op2, ir_mode *mode) {
918 return new_rd_Add(NULL, irg, block, op1, op2, mode);
920 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
921 ir_node *op1, ir_node *op2, ir_mode *mode) {
922 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
924 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
925 ir_node *op, ir_mode *mode) {
926 return new_rd_Minus(NULL, irg, block, op, mode);
928 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
929 ir_node *op1, ir_node *op2, ir_mode *mode) {
930 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
932 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
933 ir_node *memop, ir_node *op1, ir_node *op2) {
934 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
936 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
937 ir_node *memop, ir_node *op1, ir_node *op2) {
938 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
940 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
941 ir_node *memop, ir_node *op1, ir_node *op2) {
942 return new_rd_Div(NULL, irg, block, memop, op1, op2);
944 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
945 ir_node *memop, ir_node *op1, ir_node *op2) {
946 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
948 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
949 ir_node *op, ir_mode *mode) {
950 return new_rd_Abs(NULL, irg, block, op, mode);
952 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
953 ir_node *op1, ir_node *op2, ir_mode *mode) {
954 return new_rd_And(NULL, irg, block, op1, op2, mode);
956 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
957 ir_node *op1, ir_node *op2, ir_mode *mode) {
958 return new_rd_Or(NULL, irg, block, op1, op2, mode);
960 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
961 ir_node *op1, ir_node *op2, ir_mode *mode) {
962 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
964 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
965 ir_node *op, ir_mode *mode) {
966 return new_rd_Not(NULL, irg, block, op, mode);
968 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
969 ir_node *op1, ir_node *op2) {
970 return new_rd_Cmp(NULL, irg, block, op1, op2);
972 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
973 ir_node *op, ir_node *k, ir_mode *mode) {
974 return new_rd_Shl(NULL, irg, block, op, k, mode);
976 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
977 ir_node *op, ir_node *k, ir_mode *mode) {
978 return new_rd_Shr(NULL, irg, block, op, k, mode);
980 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
981 ir_node *op, ir_node *k, ir_mode *mode) {
982 return new_rd_Shrs(NULL, irg, block, op, k, mode);
984 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
985 ir_node *op, ir_node *k, ir_mode *mode) {
986 return new_rd_Rot(NULL, irg, block, op, k, mode);
988 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
989 ir_node *op, ir_mode *mode) {
990 return new_rd_Conv(NULL, irg, block, op, mode);
992 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
993 return new_rd_Cast(NULL, irg, block, op, to_tp);
995 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
996 ir_node **in, ir_mode *mode) {
997 return new_rd_Phi(NULL, irg, block, arity, in, mode);
999 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1000 ir_node *store, ir_node *adr) {
1001 return new_rd_Load(NULL, irg, block, store, adr);
1003 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1004 ir_node *store, ir_node *adr, ir_node *val) {
1005 return new_rd_Store(NULL, irg, block, store, adr, val);
1007 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1008 ir_node *size, type *alloc_type, where_alloc where) {
1009 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1011 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1012 ir_node *ptr, ir_node *size, type *free_type) {
1013 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1015 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1016 return new_rd_Sync(NULL, irg, block, arity, in);
1018 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1019 ir_mode *mode, long proj) {
1020 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1022 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1024 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1026 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1027 int arity, ir_node **in) {
1028 return new_rd_Tuple(NULL, irg, block, arity, in );
1030 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1031 ir_node *val, ir_mode *mode) {
1032 return new_rd_Id(NULL, irg, block, val, mode);
1034 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1035 return new_rd_Bad(irg);
1037 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1038 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1040 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1041 return new_rd_Unknown(irg, m);
1043 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1044 return new_rd_CallBegin(NULL, irg, block, callee);
1046 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1047 return new_rd_EndReg(NULL, irg, block);
1049 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1050 return new_rd_EndExcept(NULL, irg, block);
1052 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1053 return new_rd_Break(NULL, irg, block);
1055 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1056 ir_mode *mode, long proj) {
1057 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1059 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1060 ir_node *callee, int arity, ir_node **in,
1062 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1066 /** ********************/
1067 /** public interfaces */
1068 /** construction tools */
1072 * - create a new Start node in the current block
1074 * @return s - pointer to the created Start node
1079 new_d_Start (dbg_info* db)
1083 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1084 op_Start, mode_T, 0, NULL);
1085 /* res->attr.start.irg = current_ir_graph; */
1087 res = optimize_node(res);
1088 IRN_VRFY_IRG(res, current_ir_graph);
1093 new_d_End (dbg_info* db)
1096 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1097 op_End, mode_X, -1, NULL);
1098 res = optimize_node(res);
1099 IRN_VRFY_IRG(res, current_ir_graph);
1104 /* Constructs a Block with a fixed number of predecessors.
1105 Does set current_block. Can be used with automatic Phi
1106 node construction. */
1108 new_d_Block (dbg_info* db, int arity, ir_node **in)
1112 bool has_unknown = false;
1114 res = new_rd_Block(db, current_ir_graph, arity, in);
1116 /* Create and initialize array for Phi-node construction. */
1117 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1118 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1119 current_ir_graph->n_loc);
1120 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1123 for (i = arity-1; i >= 0; i--)
1124 if (get_irn_op(in[i]) == op_Unknown) {
1129 if (!has_unknown) res = optimize_node(res);
1130 current_ir_graph->current_block = res;
1132 IRN_VRFY_IRG(res, current_ir_graph);
1137 /* ***********************************************************************/
1138 /* Methods necessary for automatic Phi node creation */
1140 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1141 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1142 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1143 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1145 Call Graph: ( A ---> B == A "calls" B)
1147 get_value mature_immBlock
1155 get_r_value_internal |
1159 new_rd_Phi0 new_rd_Phi_in
1161 * *************************************************************************** */
1163 /** Creates a Phi node with 0 predecessors */
1164 static INLINE ir_node *
1165 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1169 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1170 IRN_VRFY_IRG(res, irg);
1174 /* There are two implementations of the Phi node construction. The first
1175 is faster, but does not work for blocks with more than 2 predecessors.
1176 The second works always but is slower and causes more unnecessary Phi
1178 Select the implementations by the following preprocessor flag set in
1180 #if USE_FAST_PHI_CONSTRUCTION
1182 /* This is a stack used for allocating and deallocating nodes in
1183 new_rd_Phi_in. The original implementation used the obstack
1184 to model this stack, now it is explicit. This reduces side effects.
1186 #if USE_EXPLICIT_PHI_IN_STACK
1187 INLINE Phi_in_stack *
1188 new_Phi_in_stack(void) {
1191 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1193 res->stack = NEW_ARR_F (ir_node *, 0);
1200 free_Phi_in_stack(Phi_in_stack *s) {
1201 DEL_ARR_F(s->stack);
1205 free_to_Phi_in_stack(ir_node *phi) {
1206 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1207 current_ir_graph->Phi_in_stack->pos)
1208 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1210 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1212 (current_ir_graph->Phi_in_stack->pos)++;
1215 static INLINE ir_node *
1216 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1217 int arity, ir_node **in) {
1219 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1220 int pos = current_ir_graph->Phi_in_stack->pos;
1224 /* We need to allocate a new node */
1225 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1226 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1228 /* reuse the old node and initialize it again. */
1231 assert (res->kind == k_ir_node);
1232 assert (res->op == op_Phi);
1236 assert (arity >= 0);
1237 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1238 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1240 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1242 (current_ir_graph->Phi_in_stack->pos)--;
1246 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1248 /* Creates a Phi node with a given, fixed array **in of predecessors.
1249 If the Phi node is unnecessary, as the same value reaches the block
1250 through all control flow paths, it is eliminated and the value
1251 returned directly. This constructor is only intended for use in
1252 the automatic Phi node generation triggered by get_value or mature.
1253 The implementation is quite tricky and depends on the fact, that
1254 the nodes are allocated on a stack:
1255 The in array contains predecessors and NULLs. The NULLs appear,
1256 if get_r_value_internal, that computed the predecessors, reached
1257 the same block on two paths. In this case the same value reaches
1258 this block on both paths, there is no definition in between. We need
1259 not allocate a Phi where these path's merge, but we have to communicate
1260 this fact to the caller. This happens by returning a pointer to the
1261 node the caller _will_ allocate. (Yes, we predict the address. We can
1262 do so because the nodes are allocated on the obstack.) The caller then
1263 finds a pointer to itself and, when this routine is called again,
1266 static INLINE ir_node *
1267 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1270 ir_node *res, *known;
1272 /* Allocate a new node on the obstack. This can return a node to
1273 which some of the pointers in the in-array already point.
1274 Attention: the constructor copies the in array, i.e., the later
1275 changes to the array in this routine do not affect the
1276 constructed node! If the in array contains NULLs, there will be
1277 missing predecessors in the returned node. Is this a possible
1278 internal state of the Phi node generation? */
1279 #if USE_EXPLICIT_PHI_IN_STACK
1280 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1282 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1283 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1286 /* The in-array can contain NULLs. These were returned by
1287 get_r_value_internal if it reached the same block/definition on a
1288 second path. The NULLs are replaced by the node itself to
1289 simplify the test in the next loop. */
1290 for (i = 0; i < ins; ++i) {
1295 /* This loop checks whether the Phi has more than one predecessor.
1296 If so, it is a real Phi node and we break the loop. Else the Phi
1297 node merges the same definition on several paths and therefore is
1299 for (i = 0; i < ins; ++i)
1301 if (in[i] == res || in[i] == known) continue;
1309 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1311 #if USE_EXPLICIT_PHI_IN_STACK
1312 free_to_Phi_in_stack(res);
1314 obstack_free (current_ir_graph->obst, res);
1318 res = optimize_node (res);
1319 IRN_VRFY_IRG(res, irg);
1322 /* return the pointer to the Phi node. This node might be deallocated! */
1327 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1330 allocates and returns this node. The routine called to allocate the
1331 node might optimize it away and return a real value, or even a pointer
1332 to a deallocated Phi node on top of the obstack!
1333 This function is called with an in-array of proper size. **/
1335 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1337 ir_node *prevBlock, *res;
1340 /* This loop goes to all predecessor blocks of the block the Phi node is in
1341 and there finds the operands of the Phi node by calling
1342 get_r_value_internal. */
1343 for (i = 1; i <= ins; ++i) {
1344 assert (block->in[i]);
1345 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1347 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1350 /* After collecting all predecessors into the array nin a new Phi node
1351 with these predecessors is created. This constructor contains an
1352 optimization: If all predecessors of the Phi node are identical it
1353 returns the only operand instead of a new Phi node. If the value
1354 passes two different control flow edges without being defined, and
1355 this is the second path treated, a pointer to the node that will be
1356 allocated for the first path (recursion) is returned. We already
1357 know the address of this node, as it is the next node to be allocated
1358 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1359 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1361 /* Now we now the value for "pos" and can enter it in the array with
1362 all known local variables. Attention: this might be a pointer to
1363 a node, that later will be allocated!!! See new_rd_Phi_in.
1364 If this is called in mature, after some set_value in the same block,
1365 the proper value must not be overwritten:
1367 get_value (makes Phi0, put's it into graph_arr)
1368 set_value (overwrites Phi0 in graph_arr)
1369 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1372 if (!block->attr.block.graph_arr[pos]) {
1373 block->attr.block.graph_arr[pos] = res;
1375 /* printf(" value already computed by %s\n",
1376 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1382 /* This function returns the last definition of a variable. In case
1383 this variable was last defined in a previous block, Phi nodes are
1384 inserted. If the part of the firm graph containing the definition
1385 is not yet constructed, a dummy Phi node is returned. */
1387 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1390 /* There are 4 cases to treat.
1392 1. The block is not mature and we visit it the first time. We can not
1393 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1394 predecessors is returned. This node is added to the linked list (field
1395 "link") of the containing block to be completed when this block is
1396 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1399 2. The value is already known in this block, graph_arr[pos] is set and we
1400 visit the block the first time. We can return the value without
1401 creating any new nodes.
1403 3. The block is mature and we visit it the first time. A Phi node needs
1404 to be created (phi_merge). If the Phi is not needed, as all it's
1405 operands are the same value reaching the block through different
1406 paths, it's optimized away and the value itself is returned.
1408 4. The block is mature, and we visit it the second time. Now two
1409 subcases are possible:
1410 * The value was computed completely the last time we were here. This
1411 is the case if there is no loop. We can return the proper value.
1412 * The recursion that visited this node and set the flag did not
1413 return yet. We are computing a value in a loop and need to
1414 break the recursion without knowing the result yet.
1415 @@@ strange case. Straight forward we would create a Phi before
1416 starting the computation of it's predecessors. In this case we will
1417 find a Phi here in any case. The problem is that this implementation
1418 only creates a Phi after computing the predecessors, so that it is
1419 hard to compute self references of this Phi. @@@
1420 There is no simple check for the second subcase. Therefore we check
1421 for a second visit and treat all such cases as the second subcase.
1422 Anyways, the basic situation is the same: we reached a block
1423 on two paths without finding a definition of the value: No Phi
1424 nodes are needed on both paths.
1425 We return this information "Two paths, no Phi needed" by a very tricky
1426 implementation that relies on the fact that an obstack is a stack and
1427 will return a node with the same address on different allocations.
1428 Look also at phi_merge and new_rd_phi_in to understand this.
1429 @@@ Unfortunately this does not work, see testprogram
1430 three_cfpred_example.
1434 /* case 4 -- already visited. */
1435 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1437 /* visited the first time */
1438 set_irn_visited(block, get_irg_visited(current_ir_graph));
1440 /* Get the local valid value */
1441 res = block->attr.block.graph_arr[pos];
1443 /* case 2 -- If the value is actually computed, return it. */
1444 if (res) return res;
1446 if (block->attr.block.matured) { /* case 3 */
1448 /* The Phi has the same amount of ins as the corresponding block. */
1449 int ins = get_irn_arity(block);
1451 NEW_ARR_A (ir_node *, nin, ins);
1453 /* Phi merge collects the predecessors and then creates a node. */
1454 res = phi_merge (block, pos, mode, nin, ins);
1456 } else { /* case 1 */
1457 /* The block is not mature, we don't know how many in's are needed. A Phi
1458 with zero predecessors is created. Such a Phi node is called Phi0
1459 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1460 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1462 The Phi0 has to remember the pos of it's internal value. If the real
1463 Phi is computed, pos is used to update the array with the local
1466 res = new_rd_Phi0 (current_ir_graph, block, mode);
1467 res->attr.phi0_pos = pos;
1468 res->link = block->link;
1472 /* If we get here, the frontend missed a use-before-definition error */
1475 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1476 assert (mode->code >= irm_F && mode->code <= irm_P);
1477 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1478 tarval_mode_null[mode->code]);
1481 /* The local valid value is available now. */
1482 block->attr.block.graph_arr[pos] = res;
1490 it starts the recursion. This causes an Id at the entry of
1491 every block that has no definition of the value! **/
1493 #if USE_EXPLICIT_PHI_IN_STACK
1495 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1496 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1499 static INLINE ir_node *
1500 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1501 ir_node **in, int ins, ir_node *phi0)
1504 ir_node *res, *known;
1506 /* Allocate a new node on the obstack. The allocation copies the in
1508 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1509 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1511 /* This loop checks whether the Phi has more than one predecessor.
1512 If so, it is a real Phi node and we break the loop. Else the
1513 Phi node merges the same definition on several paths and therefore
1514 is not needed. Don't consider Bad nodes! */
1516 for (i=0; i < ins; ++i)
1520 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1522 /* Optimize self referencing Phis: We can't detect them yet properly, as
1523 they still refer to the Phi0 they will replace. So replace right now. */
1524 if (phi0 && in[i] == phi0) in[i] = res;
1526 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1534 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1537 obstack_free (current_ir_graph->obst, res);
1538 if (is_Phi(known)) {
1539 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1540 order, an enclosing Phi know may get superfluous. */
1541 res = optimize_in_place_2(known);
1542 if (res != known) { exchange(known, res); }
1547 /* A undefined value, e.g., in unreachable code. */
1551 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1552 IRN_VRFY_IRG(res, irg);
1553 /* Memory Phis in endless loops must be kept alive.
1554 As we can't distinguish these easily we keep all of them alive. */
1555 if ((res->op == op_Phi) && (mode == mode_M))
1556 add_End_keepalive(irg->end, res);
1563 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1565 #if PRECISE_EXC_CONTEXT
1567 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1569 /* Construct a new frag_array for node n.
1570 Copy the content from the current graph_arr of the corresponding block:
1571 this is the current state.
1572 Set ProjM(n) as current memory state.
1573 Further the last entry in frag_arr of current block points to n. This
1574 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1576 static INLINE ir_node ** new_frag_arr (ir_node *n)
1581 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1582 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1583 sizeof(ir_node *)*current_ir_graph->n_loc);
1585 /* turn off optimization before allocating Proj nodes, as res isn't
1587 opt = get_opt_optimize(); set_optimize(0);
1588 /* Here we rely on the fact that all frag ops have Memory as first result! */
1589 if (get_irn_op(n) == op_Call)
1590 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1592 assert((pn_Quot_M == pn_DivMod_M) &&
1593 (pn_Quot_M == pn_Div_M) &&
1594 (pn_Quot_M == pn_Mod_M) &&
1595 (pn_Quot_M == pn_Load_M) &&
1596 (pn_Quot_M == pn_Store_M) &&
1597 (pn_Quot_M == pn_Alloc_M) );
1598 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1602 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1606 static INLINE ir_node **
1607 get_frag_arr (ir_node *n) {
1608 if (get_irn_op(n) == op_Call) {
1609 return n->attr.call.frag_arr;
1610 } else if (get_irn_op(n) == op_Alloc) {
1611 return n->attr.a.frag_arr;
1613 return n->attr.frag_arr;
1618 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1620 if (!frag_arr[pos]) frag_arr[pos] = val;
1621 if (frag_arr[current_ir_graph->n_loc - 1]) {
1622 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1623 assert(arr != frag_arr && "Endless recursion detected");
1624 set_frag_value(arr, pos, val);
1629 for (i = 0; i < 1000; ++i) {
1630 if (!frag_arr[pos]) {
1631 frag_arr[pos] = val;
1633 if (frag_arr[current_ir_graph->n_loc - 1]) {
1634 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1640 assert(0 && "potential endless recursion");
1645 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1649 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1651 frag_arr = get_frag_arr(cfOp);
1652 res = frag_arr[pos];
1654 if (block->attr.block.graph_arr[pos]) {
1655 /* There was a set_value after the cfOp and no get_value before that
1656 set_value. We must build a Phi node now. */
1657 if (block->attr.block.matured) {
1658 int ins = get_irn_arity(block);
1660 NEW_ARR_A (ir_node *, nin, ins);
1661 res = phi_merge(block, pos, mode, nin, ins);
1663 res = new_rd_Phi0 (current_ir_graph, block, mode);
1664 res->attr.phi0_pos = pos;
1665 res->link = block->link;
1669 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1670 but this should be better: (remove comment if this works) */
1671 /* It's a Phi, we can write this into all graph_arrs with NULL */
1672 set_frag_value(block->attr.block.graph_arr, pos, res);
1674 res = get_r_value_internal(block, pos, mode);
1675 set_frag_value(block->attr.block.graph_arr, pos, res);
1683 computes the predecessors for the real phi node, and then
1684 allocates and returns this node. The routine called to allocate the
1685 node might optimize it away and return a real value.
1686 This function must be called with an in-array of proper size. **/
1688 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1690 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1693 /* If this block has no value at pos create a Phi0 and remember it
1694 in graph_arr to break recursions.
1695 Else we may not set graph_arr as there a later value is remembered. */
1697 if (!block->attr.block.graph_arr[pos]) {
1698 if (block == get_irg_start_block(current_ir_graph)) {
1699 /* Collapsing to Bad tarvals is no good idea.
1700 So we call a user-supplied routine here that deals with this case as
1701 appropriate for the given language. Sorryly the only help we can give
1702 here is the position.
1704 Even if all variables are defined before use, it can happen that
1705 we get to the start block, if a cond has been replaced by a tuple
1706 (bad, jmp). In this case we call the function needlessly, eventually
1707 generating an non existant error.
1708 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1711 if (default_initialize_local_variable)
1712 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1714 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1715 /* We don't need to care about exception ops in the start block.
1716 There are none by definition. */
1717 return block->attr.block.graph_arr[pos];
1719 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1720 block->attr.block.graph_arr[pos] = phi0;
1721 #if PRECISE_EXC_CONTEXT
1722 if (get_opt_precise_exc_context()) {
1723 /* Set graph_arr for fragile ops. Also here we should break recursion.
1724 We could choose a cyclic path through an cfop. But the recursion would
1725 break at some point. */
1726 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1732 /* This loop goes to all predecessor blocks of the block the Phi node
1733 is in and there finds the operands of the Phi node by calling
1734 get_r_value_internal. */
1735 for (i = 1; i <= ins; ++i) {
1736 prevCfOp = skip_Proj(block->in[i]);
1738 if (is_Bad(prevCfOp)) {
1739 /* In case a Cond has been optimized we would get right to the start block
1740 with an invalid definition. */
1741 nin[i-1] = new_Bad();
1744 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1746 if (!is_Bad(prevBlock)) {
1747 #if PRECISE_EXC_CONTEXT
1748 if (get_opt_precise_exc_context() &&
1749 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1750 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1751 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1754 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1756 nin[i-1] = new_Bad();
1760 /* We want to pass the Phi0 node to the constructor: this finds additional
1761 optimization possibilities.
1762 The Phi0 node either is allocated in this function, or it comes from
1763 a former call to get_r_value_internal. In this case we may not yet
1764 exchange phi0, as this is done in mature_immBlock. */
1766 phi0_all = block->attr.block.graph_arr[pos];
1767 if (!((get_irn_op(phi0_all) == op_Phi) &&
1768 (get_irn_arity(phi0_all) == 0) &&
1769 (get_nodes_block(phi0_all) == block)))
1775 /* After collecting all predecessors into the array nin a new Phi node
1776 with these predecessors is created. This constructor contains an
1777 optimization: If all predecessors of the Phi node are identical it
1778 returns the only operand instead of a new Phi node. */
1779 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1781 /* In case we allocated a Phi0 node at the beginning of this procedure,
1782 we need to exchange this Phi0 with the real Phi. */
1784 exchange(phi0, res);
1785 block->attr.block.graph_arr[pos] = res;
1786 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1787 only an optimization. */
1793 /* This function returns the last definition of a variable. In case
1794 this variable was last defined in a previous block, Phi nodes are
1795 inserted. If the part of the firm graph containing the definition
1796 is not yet constructed, a dummy Phi node is returned. */
1798 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1801 /* There are 4 cases to treat.
1803 1. The block is not mature and we visit it the first time. We can not
1804 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1805 predecessors is returned. This node is added to the linked list (field
1806 "link") of the containing block to be completed when this block is
1807 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1810 2. The value is already known in this block, graph_arr[pos] is set and we
1811 visit the block the first time. We can return the value without
1812 creating any new nodes.
1814 3. The block is mature and we visit it the first time. A Phi node needs
1815 to be created (phi_merge). If the Phi is not needed, as all it's
1816 operands are the same value reaching the block through different
1817 paths, it's optimized away and the value itself is returned.
1819 4. The block is mature, and we visit it the second time. Now two
1820 subcases are possible:
1821 * The value was computed completely the last time we were here. This
1822 is the case if there is no loop. We can return the proper value.
1823 * The recursion that visited this node and set the flag did not
1824 return yet. We are computing a value in a loop and need to
1825 break the recursion. This case only happens if we visited
1826 the same block with phi_merge before, which inserted a Phi0.
1827 So we return the Phi0.
1830 /* case 4 -- already visited. */
1831 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1832 /* As phi_merge allocates a Phi0 this value is always defined. Here
1833 is the critical difference of the two algorithms. */
1834 assert(block->attr.block.graph_arr[pos]);
1835 return block->attr.block.graph_arr[pos];
1838 /* visited the first time */
1839 set_irn_visited(block, get_irg_visited(current_ir_graph));
1841 /* Get the local valid value */
1842 res = block->attr.block.graph_arr[pos];
1844 /* case 2 -- If the value is actually computed, return it. */
1845 if (res) { return res; };
1847 if (block->attr.block.matured) { /* case 3 */
1849 /* The Phi has the same amount of ins as the corresponding block. */
1850 int ins = get_irn_arity(block);
1852 NEW_ARR_A (ir_node *, nin, ins);
1854 /* Phi merge collects the predecessors and then creates a node. */
1855 res = phi_merge (block, pos, mode, nin, ins);
1857 } else { /* case 1 */
1858 /* The block is not mature, we don't know how many in's are needed. A Phi
1859 with zero predecessors is created. Such a Phi node is called Phi0
1860 node. The Phi0 is then added to the list of Phi0 nodes in this block
1861 to be matured by mature_immBlock later.
1862 The Phi0 has to remember the pos of it's internal value. If the real
1863 Phi is computed, pos is used to update the array with the local
1865 res = new_rd_Phi0 (current_ir_graph, block, mode);
1866 res->attr.phi0_pos = pos;
1867 res->link = block->link;
1871 /* If we get here, the frontend missed a use-before-definition error */
1874 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1875 assert (mode->code >= irm_F && mode->code <= irm_P);
1876 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1877 get_mode_null(mode));
1880 /* The local valid value is available now. */
1881 block->attr.block.graph_arr[pos] = res;
1886 #endif /* USE_FAST_PHI_CONSTRUCTION */
1888 /* ************************************************************************** */
1890 /** Finalize a Block node, when all control flows are known. */
1891 /** Acceptable parameters are only Block nodes. */
1893 mature_immBlock (ir_node *block)
1900 assert (get_irn_opcode(block) == iro_Block);
1901 /* @@@ should be commented in
1902 assert (!get_Block_matured(block) && "Block already matured"); */
1904 if (!get_Block_matured(block)) {
1905 ins = ARR_LEN (block->in)-1;
1906 /* Fix block parameters */
1907 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1909 /* An array for building the Phi nodes. */
1910 NEW_ARR_A (ir_node *, nin, ins);
1912 /* Traverse a chain of Phi nodes attached to this block and mature
1914 for (n = block->link; n; n=next) {
1915 inc_irg_visited(current_ir_graph);
1917 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1920 block->attr.block.matured = 1;
1922 /* Now, as the block is a finished firm node, we can optimize it.
1923 Since other nodes have been allocated since the block was created
1924 we can not free the node on the obstack. Therefore we have to call
1926 Unfortunately the optimization does not change a lot, as all allocated
1927 nodes refer to the unoptimized node.
1928 We can call _2, as global cse has no effect on blocks. */
1929 block = optimize_in_place_2(block);
1930 IRN_VRFY_IRG(block, current_ir_graph);
1935 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1937 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1942 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1944 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1949 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1951 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1957 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1959 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1964 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1966 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1971 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1974 assert(arg->op == op_Cond);
1975 arg->attr.c.kind = fragmentary;
1976 arg->attr.c.default_proj = max_proj;
1977 res = new_Proj (arg, mode_X, max_proj);
1982 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1984 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
1989 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1991 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1995 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1997 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2002 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2004 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2009 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2011 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2017 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2019 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2024 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2026 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2031 * allocate the frag array
2033 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2034 if (get_opt_precise_exc_context()) {
2035 if ((current_ir_graph->phase_state == phase_building) &&
2036 (get_irn_op(res) == op) && /* Could be optimized away. */
2037 !*frag_store) /* Could be a cse where the arr is already set. */ {
2038 *frag_store = new_frag_arr(res);
2045 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2048 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2050 #if PRECISE_EXC_CONTEXT
2051 allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */
2058 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2061 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2063 #if PRECISE_EXC_CONTEXT
2064 allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */
2071 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2074 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2076 #if PRECISE_EXC_CONTEXT
2077 allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */
2084 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2087 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2089 #if PRECISE_EXC_CONTEXT
2090 allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */
2097 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2099 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2104 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2106 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2111 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2113 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2118 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2120 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2125 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2127 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2132 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2134 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2139 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2141 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2146 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2148 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2153 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2155 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2160 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2162 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2167 new_d_Jmp (dbg_info* db)
2169 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2173 new_d_Cond (dbg_info* db, ir_node *c)
2175 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2179 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2183 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2184 store, callee, arity, in, tp);
2185 #if PRECISE_EXC_CONTEXT
2186 allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
2193 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2195 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2200 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2202 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2207 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2210 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2212 #if PRECISE_EXC_CONTEXT
2213 allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */
2220 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2223 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2225 #if PRECISE_EXC_CONTEXT
2226 allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */
2233 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2237 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2238 store, size, alloc_type, where);
2239 #if PRECISE_EXC_CONTEXT
2240 allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
2247 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2249 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2250 store, ptr, size, free_type);
2254 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2255 /* GL: objptr was called frame before. Frame was a bad choice for the name
2256 as the operand could as well be a pointer to a dynamic object. */
2258 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2259 store, objptr, 0, NULL, ent);
2263 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2265 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2266 store, objptr, n_index, index, sel);
2270 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2272 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2273 store, objptr, ent));
2277 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2279 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2284 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2286 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2291 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2293 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2301 return __new_d_Bad();
2305 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2307 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2312 new_d_Unknown (ir_mode *m)
2314 return new_rd_Unknown(current_ir_graph, m);
2318 new_d_CallBegin (dbg_info *db, ir_node *call)
2321 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2326 new_d_EndReg (dbg_info *db)
2329 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2334 new_d_EndExcept (dbg_info *db)
2337 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2342 new_d_Break (dbg_info *db)
2344 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2348 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2350 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2355 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2359 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2360 callee, arity, in, tp);
2365 /* ********************************************************************* */
2366 /* Comfortable interface with automatic Phi node construction. */
2367 /* (Uses also constructors of ?? interface, except new_Block. */
2368 /* ********************************************************************* */
2370 /* * Block construction **/
2371 /* immature Block without predecessors */
2372 ir_node *new_d_immBlock (dbg_info* db) {
2375 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2376 /* creates a new dynamic in-array as length of in is -1 */
2377 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2378 current_ir_graph->current_block = res;
2379 res->attr.block.matured = 0;
2380 /* res->attr.block.exc = exc_normal; */
2381 /* res->attr.block.handler_entry = 0; */
2382 res->attr.block.irg = current_ir_graph;
2383 res->attr.block.backedge = NULL;
2384 res->attr.block.in_cg = NULL;
2385 res->attr.block.cg_backedge = NULL;
2386 set_Block_block_visited(res, 0);
2388 /* Create and initialize array for Phi-node construction. */
2389 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2390 current_ir_graph->n_loc);
2391 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2393 /* Immature block may not be optimized! */
2394 IRN_VRFY_IRG(res, current_ir_graph);
2400 new_immBlock (void) {
2401 return new_d_immBlock(NULL);
2404 /* add an adge to a jmp/control flow node */
2406 add_immBlock_pred (ir_node *block, ir_node *jmp)
2408 if (block->attr.block.matured) {
2409 assert(0 && "Error: Block already matured!\n");
2412 assert(jmp != NULL);
2413 ARR_APP1(ir_node *, block->in, jmp);
2417 /* changing the current block */
2419 set_cur_block (ir_node *target)
2421 current_ir_graph->current_block = target;
2424 /* ************************ */
2425 /* parameter administration */
2427 /* get a value from the parameter array from the current block by its index */
2429 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2431 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2432 inc_irg_visited(current_ir_graph);
2434 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2436 /* get a value from the parameter array from the current block by its index */
2438 get_value (int pos, ir_mode *mode)
2440 return get_d_value(NULL, pos, mode);
2443 /* set a value at position pos in the parameter array from the current block */
2445 set_value (int pos, ir_node *value)
2447 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2448 assert(pos+1 < current_ir_graph->n_loc);
2449 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2452 /* get the current store */
2456 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2457 /* GL: one could call get_value instead */
2458 inc_irg_visited(current_ir_graph);
2459 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2462 /* set the current store */
2464 set_store (ir_node *store)
2466 /* GL: one could call set_value instead */
2467 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2468 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2472 keep_alive (ir_node *ka)
2474 add_End_keepalive(current_ir_graph->end, ka);
2477 /** Useful access routines **/
2478 /* Returns the current block of the current graph. To set the current
2479 block use set_cur_block. */
2480 ir_node *get_cur_block() {
2481 return get_irg_current_block(current_ir_graph);
2484 /* Returns the frame type of the current graph */
2485 type *get_cur_frame_type() {
2486 return get_irg_frame_type(current_ir_graph);
2490 /* ********************************************************************* */
2493 /* call once for each run of the library */
2495 init_cons (default_initialize_local_variable_func_t *func)
2497 default_initialize_local_variable = func;
2500 /* call for each graph */
2502 finalize_cons (ir_graph *irg) {
2503 irg->phase_state = phase_high;
2507 ir_node *new_Block(int arity, ir_node **in) {
2508 return new_d_Block(NULL, arity, in);
2510 ir_node *new_Start (void) {
2511 return new_d_Start(NULL);
2513 ir_node *new_End (void) {
2514 return new_d_End(NULL);
2516 ir_node *new_Jmp (void) {
2517 return new_d_Jmp(NULL);
2519 ir_node *new_Cond (ir_node *c) {
2520 return new_d_Cond(NULL, c);
2522 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2523 return new_d_Return(NULL, store, arity, in);
2525 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2526 return new_d_Raise(NULL, store, obj);
2528 ir_node *new_Const (ir_mode *mode, tarval *con) {
2529 return new_d_Const(NULL, mode, con);
2531 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2532 return new_d_SymConst(NULL, value, kind);
2534 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2535 return new_d_simpleSel(NULL, store, objptr, ent);
2537 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2539 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2541 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2542 return new_d_InstOf (NULL, store, objptr, ent);
2544 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2546 return new_d_Call(NULL, store, callee, arity, in, tp);
2548 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2549 return new_d_Add(NULL, op1, op2, mode);
2551 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2552 return new_d_Sub(NULL, op1, op2, mode);
2554 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2555 return new_d_Minus(NULL, op, mode);
2557 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2558 return new_d_Mul(NULL, op1, op2, mode);
2560 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2561 return new_d_Quot(NULL, memop, op1, op2);
2563 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2564 return new_d_DivMod(NULL, memop, op1, op2);
2566 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2567 return new_d_Div(NULL, memop, op1, op2);
2569 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2570 return new_d_Mod(NULL, memop, op1, op2);
2572 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2573 return new_d_Abs(NULL, op, mode);
2575 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2576 return new_d_And(NULL, op1, op2, mode);
2578 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2579 return new_d_Or(NULL, op1, op2, mode);
2581 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2582 return new_d_Eor(NULL, op1, op2, mode);
2584 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2585 return new_d_Not(NULL, op, mode);
2587 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2588 return new_d_Shl(NULL, op, k, mode);
2590 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2591 return new_d_Shr(NULL, op, k, mode);
2593 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2594 return new_d_Shrs(NULL, op, k, mode);
2596 #define new_Rotate new_Rot
2597 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2598 return new_d_Rot(NULL, op, k, mode);
2600 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2601 return new_d_Cmp(NULL, op1, op2);
2603 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2604 return new_d_Conv(NULL, op, mode);
2606 ir_node *new_Cast (ir_node *op, type *to_tp) {
2607 return new_d_Cast(NULL, op, to_tp);
2609 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2610 return new_d_Phi(NULL, arity, in, mode);
2612 ir_node *new_Load (ir_node *store, ir_node *addr) {
2613 return new_d_Load(NULL, store, addr);
2615 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2616 return new_d_Store(NULL, store, addr, val);
2618 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2619 where_alloc where) {
2620 return new_d_Alloc(NULL, store, size, alloc_type, where);
2622 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2624 return new_d_Free(NULL, store, ptr, size, free_type);
2626 ir_node *new_Sync (int arity, ir_node **in) {
2627 return new_d_Sync(NULL, arity, in);
2629 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2630 return new_d_Proj(NULL, arg, mode, proj);
2632 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2633 return new_d_defaultProj(NULL, arg, max_proj);
2635 ir_node *new_Tuple (int arity, ir_node **in) {
2636 return new_d_Tuple(NULL, arity, in);
2638 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2639 return new_d_Id(NULL, val, mode);
2641 ir_node *new_Bad (void) {
2644 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2645 return new_d_Confirm (NULL, val, bound, cmp);
2647 ir_node *new_Unknown(ir_mode *m) {
2648 return new_d_Unknown(m);
2650 ir_node *new_CallBegin (ir_node *callee) {
2651 return new_d_CallBegin(NULL, callee);
2653 ir_node *new_EndReg (void) {
2654 return new_d_EndReg(NULL);
2656 ir_node *new_EndExcept (void) {
2657 return new_d_EndExcept(NULL);
2659 ir_node *new_Break (void) {
2660 return new_d_Break(NULL);
2662 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2663 return new_d_Filter(NULL, arg, mode, proj);
2665 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2666 return new_d_FuncCall(NULL, callee, arity, in, tp);