3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * language dependant initialization variable
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* -------------------------------------------- */
66 /* privat interfaces, for professional use only */
67 /* -------------------------------------------- */
69 /* Constructs a Block with a fixed number of predecessors.
70 Does not set current_block. Can not be used with automatic
71 Phi node construction. */
73 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
77 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
78 set_Block_matured(res, 1);
79 set_Block_block_visited(res, 0);
81 /* res->attr.block.exc = exc_normal; */
82 /* res->attr.block.handler_entry = 0; */
83 res->attr.block.dead = 0;
84 res->attr.block.irg = irg;
85 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
86 res->attr.block.in_cg = NULL;
87 res->attr.block.cg_backedge = NULL;
88 res->attr.block.extblk = NULL;
90 IRN_VRFY_IRG(res, irg);
95 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
99 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
100 /* res->attr.start.irg = irg; */
102 IRN_VRFY_IRG(res, irg);
107 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
124 bool has_unknown = false;
126 /* Don't assert that block matured: the use of this constructor is strongly
128 if ( get_Block_matured(block) )
129 assert( get_irn_arity(block) == arity );
131 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
133 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
135 for (i = arity-1; i >= 0; i--)
136 if (get_irn_op(in[i]) == op_Unknown) {
141 if (!has_unknown) res = optimize_node (res);
142 IRN_VRFY_IRG(res, irg);
144 /* Memory Phis in endless loops must be kept alive.
145 As we can't distinguish these easily we keep all of them alive. */
146 if ((res->op == op_Phi) && (mode == mode_M))
147 add_End_keepalive(irg->end, res);
152 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
157 res->attr.con.tv = con;
158 set_Const_type(res, tp); /* Call method because of complex assertion. */
159 res = optimize_node (res);
160 assert(get_Const_type(res) == tp);
161 IRN_VRFY_IRG(res, irg);
167 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
169 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
173 new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
175 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
179 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
183 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
184 res = optimize_node(res);
185 IRN_VRFY_IRG(res, irg);
190 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
195 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
196 res->attr.proj = proj;
199 assert(get_Proj_pred(res));
200 assert(get_nodes_block(get_Proj_pred(res)));
202 res = optimize_node(res);
204 IRN_VRFY_IRG(res, irg);
210 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
214 assert(arg->op == op_Cond);
215 arg->attr.c.kind = fragmentary;
216 arg->attr.c.default_proj = max_proj;
217 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
222 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
226 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
227 res = optimize_node(res);
228 IRN_VRFY_IRG(res, irg);
233 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
237 assert(is_atomic_type(to_tp));
239 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
240 res->attr.cast.totype = to_tp;
241 res = optimize_node(res);
242 IRN_VRFY_IRG(res, irg);
247 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
251 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
252 res = optimize_node (res);
253 IRN_VRFY_IRG(res, irg);
258 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
259 ir_node *op1, ir_node *op2, ir_mode *mode)
266 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
267 res = optimize_node(res);
268 IRN_VRFY_IRG(res, irg);
273 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
274 ir_node *op1, ir_node *op2, ir_mode *mode)
281 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
282 res = optimize_node (res);
283 IRN_VRFY_IRG(res, irg);
288 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *op, ir_mode *mode)
293 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
294 res = optimize_node(res);
295 IRN_VRFY_IRG(res, irg);
300 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *op1, ir_node *op2, ir_mode *mode)
308 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
309 res = optimize_node(res);
310 IRN_VRFY_IRG(res, irg);
315 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
325 res = optimize_node(res);
326 IRN_VRFY_IRG(res, irg);
331 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
340 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
347 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *memop, ir_node *op1, ir_node *op2)
356 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
357 res = optimize_node(res);
358 IRN_VRFY_IRG(res, irg);
363 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
364 ir_node *memop, ir_node *op1, ir_node *op2)
372 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
373 res = optimize_node(res);
374 IRN_VRFY_IRG(res, irg);
379 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
380 ir_node *op1, ir_node *op2, ir_mode *mode)
387 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
388 res = optimize_node(res);
389 IRN_VRFY_IRG(res, irg);
394 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
395 ir_node *op1, ir_node *op2, ir_mode *mode)
402 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
403 res = optimize_node(res);
404 IRN_VRFY_IRG(res, irg);
409 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
410 ir_node *op1, ir_node *op2, ir_mode *mode)
417 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
418 res = optimize_node (res);
419 IRN_VRFY_IRG(res, irg);
424 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
425 ir_node *op, ir_mode *mode)
429 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
437 ir_node *op, ir_node *k, ir_mode *mode)
444 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
445 res = optimize_node(res);
446 IRN_VRFY_IRG(res, irg);
451 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
452 ir_node *op, ir_node *k, ir_mode *mode)
459 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
460 res = optimize_node(res);
461 IRN_VRFY_IRG(res, irg);
466 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
467 ir_node *op, ir_node *k, ir_mode *mode)
474 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
475 res = optimize_node(res);
476 IRN_VRFY_IRG(res, irg);
481 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
482 ir_node *op, ir_node *k, ir_mode *mode)
489 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
490 res = optimize_node(res);
491 IRN_VRFY_IRG(res, irg);
496 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
497 ir_node *op, ir_mode *mode)
501 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
502 res = optimize_node (res);
503 IRN_VRFY_IRG(res, irg);
508 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
509 ir_node *op1, ir_node *op2)
516 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
517 res = optimize_node(res);
518 IRN_VRFY_IRG(res, irg);
523 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
527 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
528 res = optimize_node (res);
529 IRN_VRFY_IRG (res, irg);
534 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
538 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
539 res->attr.c.kind = dense;
540 res->attr.c.default_proj = 0;
541 res = optimize_node (res);
542 IRN_VRFY_IRG(res, irg);
547 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
548 ir_node *callee, int arity, ir_node **in, type *tp)
555 NEW_ARR_A(ir_node *, r_in, r_arity);
558 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
560 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
562 assert((get_unknown_type() == tp) || is_Method_type(tp));
563 set_Call_type(res, tp);
564 res->attr.call.exc.pin_state = op_pin_state_pinned;
565 res->attr.call.callee_arr = NULL;
566 res = optimize_node(res);
567 IRN_VRFY_IRG(res, irg);
572 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
573 ir_node *store, int arity, ir_node **in)
580 NEW_ARR_A (ir_node *, r_in, r_arity);
582 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
583 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
584 res = optimize_node(res);
585 IRN_VRFY_IRG(res, irg);
590 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
597 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
598 res = optimize_node(res);
599 IRN_VRFY_IRG(res, irg);
604 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
605 ir_node *store, ir_node *adr, ir_mode *mode)
612 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
613 res->attr.load.exc.pin_state = op_pin_state_pinned;
614 res->attr.load.load_mode = mode;
615 res->attr.load.volatility = volatility_non_volatile;
616 res = optimize_node(res);
617 IRN_VRFY_IRG(res, irg);
622 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
623 ir_node *store, ir_node *adr, ir_node *val)
631 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
632 res->attr.store.exc.pin_state = op_pin_state_pinned;
633 res->attr.store.volatility = volatility_non_volatile;
634 res = optimize_node(res);
635 IRN_VRFY_IRG(res, irg);
640 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
641 ir_node *size, type *alloc_type, where_alloc where)
648 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
649 res->attr.a.exc.pin_state = op_pin_state_pinned;
650 res->attr.a.where = where;
651 res->attr.a.type = alloc_type;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
658 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
659 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
667 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
668 res->attr.f.where = where;
669 res->attr.f.type = free_type;
670 res = optimize_node(res);
671 IRN_VRFY_IRG(res, irg);
676 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
677 int arity, ir_node **in, entity *ent)
683 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
686 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
689 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
690 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
691 res->attr.s.ent = ent;
692 res = optimize_node(res);
693 IRN_VRFY_IRG(res, irg);
698 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
699 ir_node *objptr, type *ent)
706 NEW_ARR_A(ir_node *, r_in, r_arity);
710 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
711 res->attr.io.ent = ent;
713 /* res = optimize(res); */
714 IRN_VRFY_IRG(res, irg);
719 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
720 symconst_kind symkind, type *tp) {
724 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
729 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
731 res->attr.i.num = symkind;
732 res->attr.i.sym = value;
735 res = optimize_node(res);
736 IRN_VRFY_IRG(res, irg);
741 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
742 symconst_kind symkind)
744 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
748 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
749 symconst_symbol sym = {(type *)symbol};
750 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
753 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
754 symconst_symbol sym = {(type *)symbol};
755 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
758 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
759 symconst_symbol sym = {symbol};
760 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
763 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
764 symconst_symbol sym = {symbol};
765 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
769 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
773 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
774 res = optimize_node(res);
775 IRN_VRFY_IRG(res, irg);
780 new_rd_Bad (ir_graph *irg)
786 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
788 ir_node *in[2], *res;
792 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
793 res->attr.confirm_cmp = cmp;
794 res = optimize_node (res);
795 IRN_VRFY_IRG(res, irg);
800 new_rd_Unknown (ir_graph *irg, ir_mode *m)
802 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
806 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
811 in[0] = get_Call_ptr(call);
812 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
813 /* res->attr.callbegin.irg = irg; */
814 res->attr.callbegin.call = call;
815 res = optimize_node(res);
816 IRN_VRFY_IRG(res, irg);
821 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
825 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
827 IRN_VRFY_IRG(res, irg);
832 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
836 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
837 irg->end_except = res;
838 IRN_VRFY_IRG (res, irg);
843 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
847 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
848 res = optimize_node(res);
849 IRN_VRFY_IRG(res, irg);
854 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
859 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
860 res->attr.filter.proj = proj;
861 res->attr.filter.in_cg = NULL;
862 res->attr.filter.backedge = NULL;
865 assert(get_Proj_pred(res));
866 assert(get_nodes_block(get_Proj_pred(res)));
868 res = optimize_node(res);
869 IRN_VRFY_IRG(res, irg);
874 new_rd_NoMem (ir_graph *irg) {
879 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
880 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
889 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
892 res = optimize_node(res);
893 IRN_VRFY_IRG(res, irg);
898 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
899 return new_rd_Block(NULL, irg, arity, in);
901 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
902 return new_rd_Start(NULL, irg, block);
904 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
905 return new_rd_End(NULL, irg, block);
907 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
908 return new_rd_Jmp(NULL, irg, block);
910 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
911 return new_rd_Cond(NULL, irg, block, c);
913 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
914 ir_node *store, int arity, ir_node **in) {
915 return new_rd_Return(NULL, irg, block, store, arity, in);
917 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
918 ir_node *store, ir_node *obj) {
919 return new_rd_Raise(NULL, irg, block, store, obj);
921 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
922 ir_mode *mode, tarval *con) {
923 return new_rd_Const(NULL, irg, block, mode, con);
926 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
927 ir_mode *mode, long value) {
928 return new_rd_Const_long(NULL, irg, block, mode, value);
931 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
932 ir_mode *mode, tarval *con, type *tp) {
933 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
936 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
937 symconst_symbol value, symconst_kind symkind) {
938 return new_rd_SymConst(NULL, irg, block, value, symkind);
940 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
941 ir_node *objptr, int n_index, ir_node **index,
943 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
945 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
947 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
949 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
950 ir_node *callee, int arity, ir_node **in,
952 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
954 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
955 ir_node *op1, ir_node *op2, ir_mode *mode) {
956 return new_rd_Add(NULL, irg, block, op1, op2, mode);
958 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
959 ir_node *op1, ir_node *op2, ir_mode *mode) {
960 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
962 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
963 ir_node *op, ir_mode *mode) {
964 return new_rd_Minus(NULL, irg, block, op, mode);
966 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
967 ir_node *op1, ir_node *op2, ir_mode *mode) {
968 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
970 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
971 ir_node *memop, ir_node *op1, ir_node *op2) {
972 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
974 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
975 ir_node *memop, ir_node *op1, ir_node *op2) {
976 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
978 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
979 ir_node *memop, ir_node *op1, ir_node *op2) {
980 return new_rd_Div(NULL, irg, block, memop, op1, op2);
982 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
983 ir_node *memop, ir_node *op1, ir_node *op2) {
984 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
986 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
987 ir_node *op, ir_mode *mode) {
988 return new_rd_Abs(NULL, irg, block, op, mode);
990 ir_node *new_r_And (ir_graph *irg, ir_node *block,
991 ir_node *op1, ir_node *op2, ir_mode *mode) {
992 return new_rd_And(NULL, irg, block, op1, op2, mode);
994 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
995 ir_node *op1, ir_node *op2, ir_mode *mode) {
996 return new_rd_Or(NULL, irg, block, op1, op2, mode);
998 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
999 ir_node *op1, ir_node *op2, ir_mode *mode) {
1000 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1002 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1003 ir_node *op, ir_mode *mode) {
1004 return new_rd_Not(NULL, irg, block, op, mode);
1006 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1007 ir_node *op1, ir_node *op2) {
1008 return new_rd_Cmp(NULL, irg, block, op1, op2);
1010 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1011 ir_node *op, ir_node *k, ir_mode *mode) {
1012 return new_rd_Shl(NULL, irg, block, op, k, mode);
1014 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1015 ir_node *op, ir_node *k, ir_mode *mode) {
1016 return new_rd_Shr(NULL, irg, block, op, k, mode);
1018 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1019 ir_node *op, ir_node *k, ir_mode *mode) {
1020 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1022 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1023 ir_node *op, ir_node *k, ir_mode *mode) {
1024 return new_rd_Rot(NULL, irg, block, op, k, mode);
1026 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1027 ir_node *op, ir_mode *mode) {
1028 return new_rd_Conv(NULL, irg, block, op, mode);
1030 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1031 return new_rd_Cast(NULL, irg, block, op, to_tp);
1033 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1034 ir_node **in, ir_mode *mode) {
1035 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1037 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1038 ir_node *store, ir_node *adr, ir_mode *mode) {
1039 return new_rd_Load(NULL, irg, block, store, adr, mode);
1041 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1042 ir_node *store, ir_node *adr, ir_node *val) {
1043 return new_rd_Store(NULL, irg, block, store, adr, val);
1045 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1046 ir_node *size, type *alloc_type, where_alloc where) {
1047 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1049 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1050 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1051 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1053 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1054 return new_rd_Sync(NULL, irg, block, arity, in);
1056 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1057 ir_mode *mode, long proj) {
1058 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1060 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1062 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1064 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1065 int arity, ir_node **in) {
1066 return new_rd_Tuple(NULL, irg, block, arity, in );
1068 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1069 ir_node *val, ir_mode *mode) {
1070 return new_rd_Id(NULL, irg, block, val, mode);
1072 ir_node *new_r_Bad (ir_graph *irg) {
1073 return new_rd_Bad(irg);
1075 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1076 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1078 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1079 return new_rd_Unknown(irg, m);
1081 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1082 return new_rd_CallBegin(NULL, irg, block, callee);
1084 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1085 return new_rd_EndReg(NULL, irg, block);
1087 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1088 return new_rd_EndExcept(NULL, irg, block);
1090 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1091 return new_rd_Break(NULL, irg, block);
1093 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1094 ir_mode *mode, long proj) {
1095 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1097 ir_node *new_r_NoMem (ir_graph *irg) {
1098 return new_rd_NoMem(irg);
1100 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1101 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1102 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1106 /** ********************/
1107 /** public interfaces */
1108 /** construction tools */
1112 * - create a new Start node in the current block
1114 * @return s - pointer to the created Start node
1119 new_d_Start (dbg_info* db)
1123 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1124 op_Start, mode_T, 0, NULL);
1125 /* res->attr.start.irg = current_ir_graph; */
1127 res = optimize_node(res);
1128 IRN_VRFY_IRG(res, current_ir_graph);
1133 new_d_End (dbg_info* db)
1136 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1137 op_End, mode_X, -1, NULL);
1138 res = optimize_node(res);
1139 IRN_VRFY_IRG(res, current_ir_graph);
1144 /* Constructs a Block with a fixed number of predecessors.
1145 Does set current_block. Can be used with automatic Phi
1146 node construction. */
1148 new_d_Block (dbg_info* db, int arity, ir_node **in)
1152 bool has_unknown = false;
1154 res = new_rd_Block(db, current_ir_graph, arity, in);
1156 /* Create and initialize array for Phi-node construction. */
1157 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1158 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1159 current_ir_graph->n_loc);
1160 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1163 for (i = arity-1; i >= 0; i--)
1164 if (get_irn_op(in[i]) == op_Unknown) {
1169 if (!has_unknown) res = optimize_node(res);
1170 current_ir_graph->current_block = res;
1172 IRN_VRFY_IRG(res, current_ir_graph);
1177 /* ***********************************************************************/
1178 /* Methods necessary for automatic Phi node creation */
1180 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1181 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1182 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1183 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1185 Call Graph: ( A ---> B == A "calls" B)
1187 get_value mature_immBlock
1195 get_r_value_internal |
1199 new_rd_Phi0 new_rd_Phi_in
1201 * *************************************************************************** */
1203 /** Creates a Phi node with 0 predecessors */
1204 static INLINE ir_node *
1205 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1209 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1210 IRN_VRFY_IRG(res, irg);
1214 /* There are two implementations of the Phi node construction. The first
1215 is faster, but does not work for blocks with more than 2 predecessors.
1216 The second works always but is slower and causes more unnecessary Phi
1218 Select the implementations by the following preprocessor flag set in
1220 #if USE_FAST_PHI_CONSTRUCTION
1222 /* This is a stack used for allocating and deallocating nodes in
1223 new_rd_Phi_in. The original implementation used the obstack
1224 to model this stack, now it is explicit. This reduces side effects.
1226 #if USE_EXPLICIT_PHI_IN_STACK
1228 new_Phi_in_stack(void) {
1231 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1233 res->stack = NEW_ARR_F (ir_node *, 0);
1240 free_Phi_in_stack(Phi_in_stack *s) {
1241 DEL_ARR_F(s->stack);
1245 free_to_Phi_in_stack(ir_node *phi) {
1246 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1247 current_ir_graph->Phi_in_stack->pos)
1248 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1250 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1252 (current_ir_graph->Phi_in_stack->pos)++;
1255 static INLINE ir_node *
1256 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1257 int arity, ir_node **in) {
1259 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1260 int pos = current_ir_graph->Phi_in_stack->pos;
1264 /* We need to allocate a new node */
1265 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1266 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1268 /* reuse the old node and initialize it again. */
1271 assert (res->kind == k_ir_node);
1272 assert (res->op == op_Phi);
1276 assert (arity >= 0);
1277 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1278 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1280 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1282 (current_ir_graph->Phi_in_stack->pos)--;
1286 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1288 /* Creates a Phi node with a given, fixed array **in of predecessors.
1289 If the Phi node is unnecessary, as the same value reaches the block
1290 through all control flow paths, it is eliminated and the value
1291 returned directly. This constructor is only intended for use in
1292 the automatic Phi node generation triggered by get_value or mature.
1293 The implementation is quite tricky and depends on the fact, that
1294 the nodes are allocated on a stack:
1295 The in array contains predecessors and NULLs. The NULLs appear,
1296 if get_r_value_internal, that computed the predecessors, reached
1297 the same block on two paths. In this case the same value reaches
1298 this block on both paths, there is no definition in between. We need
1299 not allocate a Phi where these path's merge, but we have to communicate
1300 this fact to the caller. This happens by returning a pointer to the
1301 node the caller _will_ allocate. (Yes, we predict the address. We can
1302 do so because the nodes are allocated on the obstack.) The caller then
1303 finds a pointer to itself and, when this routine is called again,
1306 static INLINE ir_node *
1307 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1310 ir_node *res, *known;
1312 /* Allocate a new node on the obstack. This can return a node to
1313 which some of the pointers in the in-array already point.
1314 Attention: the constructor copies the in array, i.e., the later
1315 changes to the array in this routine do not affect the
1316 constructed node! If the in array contains NULLs, there will be
1317 missing predecessors in the returned node. Is this a possible
1318 internal state of the Phi node generation? */
1319 #if USE_EXPLICIT_PHI_IN_STACK
1320 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1322 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1323 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1326 /* The in-array can contain NULLs. These were returned by
1327 get_r_value_internal if it reached the same block/definition on a
1328 second path. The NULLs are replaced by the node itself to
1329 simplify the test in the next loop. */
1330 for (i = 0; i < ins; ++i) {
1335 /* This loop checks whether the Phi has more than one predecessor.
1336 If so, it is a real Phi node and we break the loop. Else the Phi
1337 node merges the same definition on several paths and therefore is
1339 for (i = 0; i < ins; ++i) {
1340 if (in[i] == res || in[i] == known)
1349 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1351 #if USE_EXPLICIT_PHI_IN_STACK
1352 free_to_Phi_in_stack(res);
1354 edges_node_deleted(res, current_ir_graph);
1355 obstack_free(current_ir_graph->obst, res);
1359 res = optimize_node (res);
1360 IRN_VRFY_IRG(res, irg);
1363 /* return the pointer to the Phi node. This node might be deallocated! */
1368 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1371 allocates and returns this node. The routine called to allocate the
1372 node might optimize it away and return a real value, or even a pointer
1373 to a deallocated Phi node on top of the obstack!
1374 This function is called with an in-array of proper size. **/
1376 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1378 ir_node *prevBlock, *res;
1381 /* This loop goes to all predecessor blocks of the block the Phi node is in
1382 and there finds the operands of the Phi node by calling
1383 get_r_value_internal. */
1384 for (i = 1; i <= ins; ++i) {
1385 assert (block->in[i]);
1386 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1388 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1391 /* After collecting all predecessors into the array nin a new Phi node
1392 with these predecessors is created. This constructor contains an
1393 optimization: If all predecessors of the Phi node are identical it
1394 returns the only operand instead of a new Phi node. If the value
1395 passes two different control flow edges without being defined, and
1396 this is the second path treated, a pointer to the node that will be
1397 allocated for the first path (recursion) is returned. We already
1398 know the address of this node, as it is the next node to be allocated
1399 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1400 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1402 /* Now we now the value for "pos" and can enter it in the array with
1403 all known local variables. Attention: this might be a pointer to
1404 a node, that later will be allocated!!! See new_rd_Phi_in.
1405 If this is called in mature, after some set_value in the same block,
1406 the proper value must not be overwritten:
1408 get_value (makes Phi0, put's it into graph_arr)
1409 set_value (overwrites Phi0 in graph_arr)
1410 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1413 if (!block->attr.block.graph_arr[pos]) {
1414 block->attr.block.graph_arr[pos] = res;
1416 /* printf(" value already computed by %s\n",
1417 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1423 /* This function returns the last definition of a variable. In case
1424 this variable was last defined in a previous block, Phi nodes are
1425 inserted. If the part of the firm graph containing the definition
1426 is not yet constructed, a dummy Phi node is returned. */
1428 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1431 /* There are 4 cases to treat.
1433 1. The block is not mature and we visit it the first time. We can not
1434 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1435 predecessors is returned. This node is added to the linked list (field
1436 "link") of the containing block to be completed when this block is
1437 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1440 2. The value is already known in this block, graph_arr[pos] is set and we
1441 visit the block the first time. We can return the value without
1442 creating any new nodes.
1444 3. The block is mature and we visit it the first time. A Phi node needs
1445 to be created (phi_merge). If the Phi is not needed, as all it's
1446 operands are the same value reaching the block through different
1447 paths, it's optimized away and the value itself is returned.
1449 4. The block is mature, and we visit it the second time. Now two
1450 subcases are possible:
1451 * The value was computed completely the last time we were here. This
1452 is the case if there is no loop. We can return the proper value.
1453 * The recursion that visited this node and set the flag did not
1454 return yet. We are computing a value in a loop and need to
1455 break the recursion without knowing the result yet.
1456 @@@ strange case. Straight forward we would create a Phi before
1457 starting the computation of it's predecessors. In this case we will
1458 find a Phi here in any case. The problem is that this implementation
1459 only creates a Phi after computing the predecessors, so that it is
1460 hard to compute self references of this Phi. @@@
1461 There is no simple check for the second subcase. Therefore we check
1462 for a second visit and treat all such cases as the second subcase.
1463 Anyways, the basic situation is the same: we reached a block
1464 on two paths without finding a definition of the value: No Phi
1465 nodes are needed on both paths.
1466 We return this information "Two paths, no Phi needed" by a very tricky
1467 implementation that relies on the fact that an obstack is a stack and
1468 will return a node with the same address on different allocations.
1469 Look also at phi_merge and new_rd_phi_in to understand this.
1470 @@@ Unfortunately this does not work, see testprogram
1471 three_cfpred_example.
1475 /* case 4 -- already visited. */
1476 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1478 /* visited the first time */
1479 set_irn_visited(block, get_irg_visited(current_ir_graph));
1481 /* Get the local valid value */
1482 res = block->attr.block.graph_arr[pos];
1484 /* case 2 -- If the value is actually computed, return it. */
1485 if (res) return res;
1487 if (block->attr.block.matured) { /* case 3 */
1489 /* The Phi has the same amount of ins as the corresponding block. */
1490 int ins = get_irn_arity(block);
1492 NEW_ARR_A (ir_node *, nin, ins);
1494 /* Phi merge collects the predecessors and then creates a node. */
1495 res = phi_merge (block, pos, mode, nin, ins);
1497 } else { /* case 1 */
1498 /* The block is not mature, we don't know how many in's are needed. A Phi
1499 with zero predecessors is created. Such a Phi node is called Phi0
1500 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1501 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1503 The Phi0 has to remember the pos of it's internal value. If the real
1504 Phi is computed, pos is used to update the array with the local
1507 res = new_rd_Phi0 (current_ir_graph, block, mode);
1508 res->attr.phi0_pos = pos;
1509 res->link = block->link;
1513 /* If we get here, the frontend missed a use-before-definition error */
1516 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1517 assert (mode->code >= irm_F && mode->code <= irm_P);
1518 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1519 tarval_mode_null[mode->code]);
1522 /* The local valid value is available now. */
1523 block->attr.block.graph_arr[pos] = res;
1531 it starts the recursion. This causes an Id at the entry of
1532 every block that has no definition of the value! **/
1534 #if USE_EXPLICIT_PHI_IN_STACK
1536 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1537 void free_Phi_in_stack(Phi_in_stack *s) { }
1540 static INLINE ir_node *
1541 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1542 ir_node **in, int ins, ir_node *phi0)
1545 ir_node *res, *known;
1547 /* Allocate a new node on the obstack. The allocation copies the in
1549 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1550 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1552 /* This loop checks whether the Phi has more than one predecessor.
1553 If so, it is a real Phi node and we break the loop. Else the
1554 Phi node merges the same definition on several paths and therefore
1555 is not needed. Don't consider Bad nodes! */
1557 for (i=0; i < ins; ++i)
1561 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1563 /* Optimize self referencing Phis: We can't detect them yet properly, as
1564 they still refer to the Phi0 they will replace. So replace right now. */
1565 if (phi0 && in[i] == phi0) in[i] = res;
1567 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1575 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1578 edges_node_deleted(res, current_ir_graph);
1579 obstack_free (current_ir_graph->obst, res);
1580 if (is_Phi(known)) {
1581 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1582 order, an enclosing Phi know may get superfluous. */
1583 res = optimize_in_place_2(known);
1585 exchange(known, res);
1591 /* A undefined value, e.g., in unreachable code. */
1595 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1596 IRN_VRFY_IRG(res, irg);
1597 /* Memory Phis in endless loops must be kept alive.
1598 As we can't distinguish these easily we keep all of them alive. */
1599 if ((res->op == op_Phi) && (mode == mode_M))
1600 add_End_keepalive(irg->end, res);
1607 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1609 #if PRECISE_EXC_CONTEXT
1611 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1613 /* Construct a new frag_array for node n.
1614 Copy the content from the current graph_arr of the corresponding block:
1615 this is the current state.
1616 Set ProjM(n) as current memory state.
1617 Further the last entry in frag_arr of current block points to n. This
1618 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1620 static INLINE ir_node ** new_frag_arr (ir_node *n)
1625 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1626 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1627 sizeof(ir_node *)*current_ir_graph->n_loc);
1629 /* turn off optimization before allocating Proj nodes, as res isn't
1631 opt = get_opt_optimize(); set_optimize(0);
1632 /* Here we rely on the fact that all frag ops have Memory as first result! */
1633 if (get_irn_op(n) == op_Call)
1634 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1636 assert((pn_Quot_M == pn_DivMod_M) &&
1637 (pn_Quot_M == pn_Div_M) &&
1638 (pn_Quot_M == pn_Mod_M) &&
1639 (pn_Quot_M == pn_Load_M) &&
1640 (pn_Quot_M == pn_Store_M) &&
1641 (pn_Quot_M == pn_Alloc_M) );
1642 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1646 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1651 * returns the frag_arr from a node
1653 static INLINE ir_node **
1654 get_frag_arr (ir_node *n) {
1655 switch (get_irn_opcode(n)) {
1657 return n->attr.call.exc.frag_arr;
1659 return n->attr.a.exc.frag_arr;
1661 return n->attr.load.exc.frag_arr;
1663 return n->attr.store.exc.frag_arr;
1665 return n->attr.except.frag_arr;
1670 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1672 if (!frag_arr[pos]) frag_arr[pos] = val;
1673 if (frag_arr[current_ir_graph->n_loc - 1]) {
1674 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1675 assert(arr != frag_arr && "Endless recursion detected");
1676 set_frag_value(arr, pos, val);
1681 for (i = 0; i < 1000; ++i) {
1682 if (!frag_arr[pos]) {
1683 frag_arr[pos] = val;
1685 if (frag_arr[current_ir_graph->n_loc - 1]) {
1686 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1692 assert(0 && "potential endless recursion");
1697 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1701 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1703 frag_arr = get_frag_arr(cfOp);
1704 res = frag_arr[pos];
1706 if (block->attr.block.graph_arr[pos]) {
1707 /* There was a set_value after the cfOp and no get_value before that
1708 set_value. We must build a Phi node now. */
1709 if (block->attr.block.matured) {
1710 int ins = get_irn_arity(block);
1712 NEW_ARR_A (ir_node *, nin, ins);
1713 res = phi_merge(block, pos, mode, nin, ins);
1715 res = new_rd_Phi0 (current_ir_graph, block, mode);
1716 res->attr.phi0_pos = pos;
1717 res->link = block->link;
1721 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1722 but this should be better: (remove comment if this works) */
1723 /* It's a Phi, we can write this into all graph_arrs with NULL */
1724 set_frag_value(block->attr.block.graph_arr, pos, res);
1726 res = get_r_value_internal(block, pos, mode);
1727 set_frag_value(block->attr.block.graph_arr, pos, res);
1735 computes the predecessors for the real phi node, and then
1736 allocates and returns this node. The routine called to allocate the
1737 node might optimize it away and return a real value.
1738 This function must be called with an in-array of proper size. **/
1740 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1742 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1745 /* If this block has no value at pos create a Phi0 and remember it
1746 in graph_arr to break recursions.
1747 Else we may not set graph_arr as there a later value is remembered. */
1749 if (!block->attr.block.graph_arr[pos]) {
1750 if (block == get_irg_start_block(current_ir_graph)) {
1751 /* Collapsing to Bad tarvals is no good idea.
1752 So we call a user-supplied routine here that deals with this case as
1753 appropriate for the given language. Sorryly the only help we can give
1754 here is the position.
1756 Even if all variables are defined before use, it can happen that
1757 we get to the start block, if a cond has been replaced by a tuple
1758 (bad, jmp). In this case we call the function needlessly, eventually
1759 generating an non existant error.
1760 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1763 if (default_initialize_local_variable)
1764 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1766 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1767 /* We don't need to care about exception ops in the start block.
1768 There are none by definition. */
1769 return block->attr.block.graph_arr[pos];
1771 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1772 block->attr.block.graph_arr[pos] = phi0;
1773 #if PRECISE_EXC_CONTEXT
1774 if (get_opt_precise_exc_context()) {
1775 /* Set graph_arr for fragile ops. Also here we should break recursion.
1776 We could choose a cyclic path through an cfop. But the recursion would
1777 break at some point. */
1778 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1784 /* This loop goes to all predecessor blocks of the block the Phi node
1785 is in and there finds the operands of the Phi node by calling
1786 get_r_value_internal. */
1787 for (i = 1; i <= ins; ++i) {
1788 prevCfOp = skip_Proj(block->in[i]);
1790 if (is_Bad(prevCfOp)) {
1791 /* In case a Cond has been optimized we would get right to the start block
1792 with an invalid definition. */
1793 nin[i-1] = new_Bad();
1796 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1798 if (!is_Bad(prevBlock)) {
1799 #if PRECISE_EXC_CONTEXT
1800 if (get_opt_precise_exc_context() &&
1801 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1802 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1803 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1806 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1808 nin[i-1] = new_Bad();
1812 /* We want to pass the Phi0 node to the constructor: this finds additional
1813 optimization possibilities.
1814 The Phi0 node either is allocated in this function, or it comes from
1815 a former call to get_r_value_internal. In this case we may not yet
1816 exchange phi0, as this is done in mature_immBlock. */
1818 phi0_all = block->attr.block.graph_arr[pos];
1819 if (!((get_irn_op(phi0_all) == op_Phi) &&
1820 (get_irn_arity(phi0_all) == 0) &&
1821 (get_nodes_block(phi0_all) == block)))
1827 /* After collecting all predecessors into the array nin a new Phi node
1828 with these predecessors is created. This constructor contains an
1829 optimization: If all predecessors of the Phi node are identical it
1830 returns the only operand instead of a new Phi node. */
1831 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1833 /* In case we allocated a Phi0 node at the beginning of this procedure,
1834 we need to exchange this Phi0 with the real Phi. */
1836 exchange(phi0, res);
1837 block->attr.block.graph_arr[pos] = res;
1838 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1839 only an optimization. */
1845 /* This function returns the last definition of a variable. In case
1846 this variable was last defined in a previous block, Phi nodes are
1847 inserted. If the part of the firm graph containing the definition
1848 is not yet constructed, a dummy Phi node is returned. */
1850 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1853 /* There are 4 cases to treat.
1855 1. The block is not mature and we visit it the first time. We can not
1856 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1857 predecessors is returned. This node is added to the linked list (field
1858 "link") of the containing block to be completed when this block is
1859 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1862 2. The value is already known in this block, graph_arr[pos] is set and we
1863 visit the block the first time. We can return the value without
1864 creating any new nodes.
1866 3. The block is mature and we visit it the first time. A Phi node needs
1867 to be created (phi_merge). If the Phi is not needed, as all it's
1868 operands are the same value reaching the block through different
1869 paths, it's optimized away and the value itself is returned.
1871 4. The block is mature, and we visit it the second time. Now two
1872 subcases are possible:
1873 * The value was computed completely the last time we were here. This
1874 is the case if there is no loop. We can return the proper value.
1875 * The recursion that visited this node and set the flag did not
1876 return yet. We are computing a value in a loop and need to
1877 break the recursion. This case only happens if we visited
1878 the same block with phi_merge before, which inserted a Phi0.
1879 So we return the Phi0.
1882 /* case 4 -- already visited. */
1883 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1884 /* As phi_merge allocates a Phi0 this value is always defined. Here
1885 is the critical difference of the two algorithms. */
1886 assert(block->attr.block.graph_arr[pos]);
1887 return block->attr.block.graph_arr[pos];
1890 /* visited the first time */
1891 set_irn_visited(block, get_irg_visited(current_ir_graph));
1893 /* Get the local valid value */
1894 res = block->attr.block.graph_arr[pos];
1896 /* case 2 -- If the value is actually computed, return it. */
1897 if (res) { return res; };
1899 if (block->attr.block.matured) { /* case 3 */
1901 /* The Phi has the same amount of ins as the corresponding block. */
1902 int ins = get_irn_arity(block);
1904 NEW_ARR_A (ir_node *, nin, ins);
1906 /* Phi merge collects the predecessors and then creates a node. */
1907 res = phi_merge (block, pos, mode, nin, ins);
1909 } else { /* case 1 */
1910 /* The block is not mature, we don't know how many in's are needed. A Phi
1911 with zero predecessors is created. Such a Phi node is called Phi0
1912 node. The Phi0 is then added to the list of Phi0 nodes in this block
1913 to be matured by mature_immBlock later.
1914 The Phi0 has to remember the pos of it's internal value. If the real
1915 Phi is computed, pos is used to update the array with the local
1917 res = new_rd_Phi0 (current_ir_graph, block, mode);
1918 res->attr.phi0_pos = pos;
1919 res->link = block->link;
1923 /* If we get here, the frontend missed a use-before-definition error */
1926 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1927 assert (mode->code >= irm_F && mode->code <= irm_P);
1928 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1929 get_mode_null(mode));
1932 /* The local valid value is available now. */
1933 block->attr.block.graph_arr[pos] = res;
1938 #endif /* USE_FAST_PHI_CONSTRUCTION */
1940 /* ************************************************************************** */
1942 /** Finalize a Block node, when all control flows are known. */
1943 /** Acceptable parameters are only Block nodes. */
1945 mature_immBlock (ir_node *block)
1952 assert (get_irn_opcode(block) == iro_Block);
1953 /* @@@ should be commented in
1954 assert (!get_Block_matured(block) && "Block already matured"); */
1956 if (!get_Block_matured(block)) {
1957 ins = ARR_LEN (block->in)-1;
1958 /* Fix block parameters */
1959 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1961 /* An array for building the Phi nodes. */
1962 NEW_ARR_A (ir_node *, nin, ins);
1964 /* Traverse a chain of Phi nodes attached to this block and mature
1966 for (n = block->link; n; n=next) {
1967 inc_irg_visited(current_ir_graph);
1969 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1972 block->attr.block.matured = 1;
1974 /* Now, as the block is a finished firm node, we can optimize it.
1975 Since other nodes have been allocated since the block was created
1976 we can not free the node on the obstack. Therefore we have to call
1978 Unfortunately the optimization does not change a lot, as all allocated
1979 nodes refer to the unoptimized node.
1980 We can call _2, as global cse has no effect on blocks. */
1981 block = optimize_in_place_2(block);
1982 IRN_VRFY_IRG(block, current_ir_graph);
1987 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1989 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1994 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1996 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
2001 new_d_Const_long(dbg_info* db, ir_mode *mode, long value)
2003 return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value);
2007 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
2009 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
2015 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
2017 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
2022 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
2024 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2029 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2032 assert(arg->op == op_Cond);
2033 arg->attr.c.kind = fragmentary;
2034 arg->attr.c.default_proj = max_proj;
2035 res = new_Proj (arg, mode_X, max_proj);
2040 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2042 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2047 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2049 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2053 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2055 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2060 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2062 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2067 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2069 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2075 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2077 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2082 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2084 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2089 * allocate the frag array
2091 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2092 if (get_opt_precise_exc_context()) {
2093 if ((current_ir_graph->phase_state == phase_building) &&
2094 (get_irn_op(res) == op) && /* Could be optimized away. */
2095 !*frag_store) /* Could be a cse where the arr is already set. */ {
2096 *frag_store = new_frag_arr(res);
2103 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2106 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2108 res->attr.except.pin_state = op_pin_state_pinned;
2109 #if PRECISE_EXC_CONTEXT
2110 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2117 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2120 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2122 res->attr.except.pin_state = op_pin_state_pinned;
2123 #if PRECISE_EXC_CONTEXT
2124 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2131 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2134 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2136 res->attr.except.pin_state = op_pin_state_pinned;
2137 #if PRECISE_EXC_CONTEXT
2138 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2145 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2148 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2150 res->attr.except.pin_state = op_pin_state_pinned;
2151 #if PRECISE_EXC_CONTEXT
2152 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2159 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2161 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2166 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2168 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2173 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2175 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2180 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2182 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2187 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2189 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2194 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2196 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2201 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2203 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2208 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2210 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2215 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2217 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2222 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2224 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2229 new_d_Jmp (dbg_info* db)
2231 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2235 new_d_Cond (dbg_info* db, ir_node *c)
2237 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2241 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2245 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2246 store, callee, arity, in, tp);
2247 #if PRECISE_EXC_CONTEXT
2248 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2255 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2257 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2262 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2264 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2269 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2272 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2274 #if PRECISE_EXC_CONTEXT
2275 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2282 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2285 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2287 #if PRECISE_EXC_CONTEXT
2288 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2295 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2299 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2300 store, size, alloc_type, where);
2301 #if PRECISE_EXC_CONTEXT
2302 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2309 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2310 ir_node *size, type *free_type, where_alloc where)
2312 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2313 store, ptr, size, free_type, where);
2317 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2318 /* GL: objptr was called frame before. Frame was a bad choice for the name
2319 as the operand could as well be a pointer to a dynamic object. */
2321 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2322 store, objptr, 0, NULL, ent);
2326 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2328 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2329 store, objptr, n_index, index, sel);
2333 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2335 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2336 store, objptr, ent));
2340 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2342 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2347 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2349 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2354 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2356 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2364 return _new_d_Bad();
2368 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2370 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2375 new_d_Unknown (ir_mode *m)
2377 return new_rd_Unknown(current_ir_graph, m);
2381 new_d_CallBegin (dbg_info *db, ir_node *call)
2384 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2389 new_d_EndReg (dbg_info *db)
2392 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2397 new_d_EndExcept (dbg_info *db)
2400 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2405 new_d_Break (dbg_info *db)
2407 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2411 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2413 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2420 return _new_d_NoMem();
2424 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2425 ir_node *ir_true, ir_mode *mode) {
2426 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2427 sel, ir_false, ir_true, mode);
2430 /* ********************************************************************* */
2431 /* Comfortable interface with automatic Phi node construction. */
2432 /* (Uses also constructors of ?? interface, except new_Block. */
2433 /* ********************************************************************* */
2435 /* * Block construction **/
2436 /* immature Block without predecessors */
2437 ir_node *new_d_immBlock (dbg_info* db) {
2440 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2441 /* creates a new dynamic in-array as length of in is -1 */
2442 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2443 current_ir_graph->current_block = res;
2444 res->attr.block.matured = 0;
2445 res->attr.block.dead = 0;
2446 /* res->attr.block.exc = exc_normal; */
2447 /* res->attr.block.handler_entry = 0; */
2448 res->attr.block.irg = current_ir_graph;
2449 res->attr.block.backedge = NULL;
2450 res->attr.block.in_cg = NULL;
2451 res->attr.block.cg_backedge = NULL;
2452 set_Block_block_visited(res, 0);
2454 /* Create and initialize array for Phi-node construction. */
2455 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2456 current_ir_graph->n_loc);
2457 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2459 /* Immature block may not be optimized! */
2460 IRN_VRFY_IRG(res, current_ir_graph);
2466 new_immBlock (void) {
2467 return new_d_immBlock(NULL);
2470 /* add an adge to a jmp/control flow node */
2472 add_immBlock_pred (ir_node *block, ir_node *jmp)
2474 if (block->attr.block.matured) {
2475 assert(0 && "Error: Block already matured!\n");
2478 assert(jmp != NULL);
2479 ARR_APP1(ir_node *, block->in, jmp);
2483 /* changing the current block */
2485 set_cur_block (ir_node *target)
2487 current_ir_graph->current_block = target;
2490 /* ************************ */
2491 /* parameter administration */
2493 /* get a value from the parameter array from the current block by its index */
2495 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2497 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2498 inc_irg_visited(current_ir_graph);
2500 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2502 /* get a value from the parameter array from the current block by its index */
2504 get_value (int pos, ir_mode *mode)
2506 return get_d_value(NULL, pos, mode);
2509 /* set a value at position pos in the parameter array from the current block */
2511 set_value (int pos, ir_node *value)
2513 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2514 assert(pos+1 < current_ir_graph->n_loc);
2515 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2518 /* get the current store */
2522 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2523 /* GL: one could call get_value instead */
2524 inc_irg_visited(current_ir_graph);
2525 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2528 /* set the current store */
2530 set_store (ir_node *store)
2532 /* GL: one could call set_value instead */
2533 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2534 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2538 keep_alive (ir_node *ka)
2540 add_End_keepalive(current_ir_graph->end, ka);
2543 /** Useful access routines **/
2544 /* Returns the current block of the current graph. To set the current
2545 block use set_cur_block. */
2546 ir_node *get_cur_block() {
2547 return get_irg_current_block(current_ir_graph);
2550 /* Returns the frame type of the current graph */
2551 type *get_cur_frame_type() {
2552 return get_irg_frame_type(current_ir_graph);
2556 /* ********************************************************************* */
2559 /* call once for each run of the library */
2561 init_cons(uninitialized_local_variable_func_t *func)
2563 default_initialize_local_variable = func;
2566 /* call for each graph */
2568 irg_finalize_cons (ir_graph *irg) {
2569 irg->phase_state = phase_high;
2573 irp_finalize_cons (void) {
2574 int i, n_irgs = get_irp_n_irgs();
2575 for (i = 0; i < n_irgs; i++) {
2576 irg_finalize_cons(get_irp_irg(i));
2578 irp->phase_state = phase_high;\
2584 ir_node *new_Block(int arity, ir_node **in) {
2585 return new_d_Block(NULL, arity, in);
2587 ir_node *new_Start (void) {
2588 return new_d_Start(NULL);
2590 ir_node *new_End (void) {
2591 return new_d_End(NULL);
2593 ir_node *new_Jmp (void) {
2594 return new_d_Jmp(NULL);
2596 ir_node *new_Cond (ir_node *c) {
2597 return new_d_Cond(NULL, c);
2599 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2600 return new_d_Return(NULL, store, arity, in);
2602 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2603 return new_d_Raise(NULL, store, obj);
2605 ir_node *new_Const (ir_mode *mode, tarval *con) {
2606 return new_d_Const(NULL, mode, con);
2609 ir_node *new_Const_long(ir_mode *mode, long value)
2611 return new_d_Const_long(NULL, mode, value);
2614 ir_node *new_Const_type(tarval *con, type *tp) {
2615 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2618 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2619 return new_d_SymConst(NULL, value, kind);
2621 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2622 return new_d_simpleSel(NULL, store, objptr, ent);
2624 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2626 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2628 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2629 return new_d_InstOf (NULL, store, objptr, ent);
2631 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2633 return new_d_Call(NULL, store, callee, arity, in, tp);
2635 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2636 return new_d_Add(NULL, op1, op2, mode);
2638 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2639 return new_d_Sub(NULL, op1, op2, mode);
2641 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2642 return new_d_Minus(NULL, op, mode);
2644 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2645 return new_d_Mul(NULL, op1, op2, mode);
2647 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2648 return new_d_Quot(NULL, memop, op1, op2);
2650 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2651 return new_d_DivMod(NULL, memop, op1, op2);
2653 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2654 return new_d_Div(NULL, memop, op1, op2);
2656 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2657 return new_d_Mod(NULL, memop, op1, op2);
2659 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2660 return new_d_Abs(NULL, op, mode);
2662 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2663 return new_d_And(NULL, op1, op2, mode);
2665 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2666 return new_d_Or(NULL, op1, op2, mode);
2668 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2669 return new_d_Eor(NULL, op1, op2, mode);
2671 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2672 return new_d_Not(NULL, op, mode);
2674 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2675 return new_d_Shl(NULL, op, k, mode);
2677 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2678 return new_d_Shr(NULL, op, k, mode);
2680 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2681 return new_d_Shrs(NULL, op, k, mode);
2683 #define new_Rotate new_Rot
2684 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2685 return new_d_Rot(NULL, op, k, mode);
2687 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2688 return new_d_Cmp(NULL, op1, op2);
2690 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2691 return new_d_Conv(NULL, op, mode);
2693 ir_node *new_Cast (ir_node *op, type *to_tp) {
2694 return new_d_Cast(NULL, op, to_tp);
2696 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2697 return new_d_Phi(NULL, arity, in, mode);
2699 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2700 return new_d_Load(NULL, store, addr, mode);
2702 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2703 return new_d_Store(NULL, store, addr, val);
2705 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2706 where_alloc where) {
2707 return new_d_Alloc(NULL, store, size, alloc_type, where);
2709 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2710 type *free_type, where_alloc where) {
2711 return new_d_Free(NULL, store, ptr, size, free_type, where);
2713 ir_node *new_Sync (int arity, ir_node **in) {
2714 return new_d_Sync(NULL, arity, in);
2716 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2717 return new_d_Proj(NULL, arg, mode, proj);
2719 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2720 return new_d_defaultProj(NULL, arg, max_proj);
2722 ir_node *new_Tuple (int arity, ir_node **in) {
2723 return new_d_Tuple(NULL, arity, in);
2725 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2726 return new_d_Id(NULL, val, mode);
2728 ir_node *new_Bad (void) {
2731 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2732 return new_d_Confirm (NULL, val, bound, cmp);
2734 ir_node *new_Unknown(ir_mode *m) {
2735 return new_d_Unknown(m);
2737 ir_node *new_CallBegin (ir_node *callee) {
2738 return new_d_CallBegin(NULL, callee);
2740 ir_node *new_EndReg (void) {
2741 return new_d_EndReg(NULL);
2743 ir_node *new_EndExcept (void) {
2744 return new_d_EndExcept(NULL);
2746 ir_node *new_Break (void) {
2747 return new_d_Break(NULL);
2749 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2750 return new_d_Filter(NULL, arg, mode, proj);
2752 ir_node *new_NoMem (void) {
2753 return new_d_NoMem();
2755 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2756 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);