3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irgraph_t.h"
29 # include "irnode_t.h"
30 # include "irmode_t.h"
31 # include "ircons_t.h"
32 # include "firm_common_t.h"
38 # include "irbackedge_t.h"
39 # include "irflag_t.h"
40 # include "iredges_t.h"
42 #if USE_EXPLICIT_PHI_IN_STACK
43 /* A stack needed for the automatic Phi node construction in constructor
44 Phi_in. Redefinition in irgraph.c!! */
49 typedef struct Phi_in_stack Phi_in_stack;
52 /* when we need verifying */
54 # define IRN_VRFY_IRG(res, irg)
56 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
60 * language dependant initialization variable
62 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
64 /* -------------------------------------------- */
65 /* privat interfaces, for professional use only */
66 /* -------------------------------------------- */
68 /* Constructs a Block with a fixed number of predecessors.
69 Does not set current_block. Can not be used with automatic
70 Phi node construction. */
72 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
76 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
77 set_Block_matured(res, 1);
78 set_Block_block_visited(res, 0);
80 /* res->attr.block.exc = exc_normal; */
81 /* res->attr.block.handler_entry = 0; */
82 res->attr.block.dead = 0;
83 res->attr.block.irg = irg;
84 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
85 res->attr.block.in_cg = NULL;
86 res->attr.block.cg_backedge = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
97 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
98 /* res->attr.start.irg = irg; */
100 IRN_VRFY_IRG(res, irg);
105 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
109 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
111 IRN_VRFY_IRG(res, irg);
115 /* Creates a Phi node with all predecessors. Calling this constructor
116 is only allowed if the corresponding block is mature. */
118 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
122 bool has_unknown = false;
124 /* Don't assert that block matured: the use of this constructor is strongly
126 if ( get_Block_matured(block) )
127 assert( get_irn_arity(block) == arity );
129 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
131 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
133 for (i = arity-1; i >= 0; i--)
134 if (get_irn_op(in[i]) == op_Unknown) {
139 if (!has_unknown) res = optimize_node (res);
140 IRN_VRFY_IRG(res, irg);
142 /* Memory Phis in endless loops must be kept alive.
143 As we can't distinguish these easily we keep all of them alive. */
144 if ((res->op == op_Phi) && (mode == mode_M))
145 add_End_keepalive(irg->end, res);
150 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
154 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
155 res->attr.con.tv = con;
156 set_Const_type(res, tp); /* Call method because of complex assertion. */
157 res = optimize_node (res);
158 assert(get_Const_type(res) == tp);
159 IRN_VRFY_IRG(res, irg);
165 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
167 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
171 new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
173 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
177 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
181 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
182 res = optimize_node(res);
183 IRN_VRFY_IRG(res, irg);
188 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
193 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
194 res->attr.proj = proj;
197 assert(get_Proj_pred(res));
198 assert(get_nodes_block(get_Proj_pred(res)));
200 res = optimize_node(res);
202 IRN_VRFY_IRG(res, irg);
208 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
212 assert(arg->op == op_Cond);
213 arg->attr.c.kind = fragmentary;
214 arg->attr.c.default_proj = max_proj;
215 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
220 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
224 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
225 res = optimize_node(res);
226 IRN_VRFY_IRG(res, irg);
231 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
235 assert(is_atomic_type(to_tp));
237 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
238 res->attr.cast.totype = to_tp;
239 res = optimize_node(res);
240 IRN_VRFY_IRG(res, irg);
245 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
249 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
250 res = optimize_node (res);
251 IRN_VRFY_IRG(res, irg);
256 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
257 ir_node *op1, ir_node *op2, ir_mode *mode)
264 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
265 res = optimize_node(res);
266 IRN_VRFY_IRG(res, irg);
271 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
279 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
280 res = optimize_node (res);
281 IRN_VRFY_IRG(res, irg);
286 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
287 ir_node *op, ir_mode *mode)
291 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
292 res = optimize_node(res);
293 IRN_VRFY_IRG(res, irg);
298 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
299 ir_node *op1, ir_node *op2, ir_mode *mode)
306 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
314 ir_node *memop, ir_node *op1, ir_node *op2)
322 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
323 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
329 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
330 ir_node *memop, ir_node *op1, ir_node *op2)
338 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
339 res = optimize_node(res);
340 IRN_VRFY_IRG(res, irg);
345 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
346 ir_node *memop, ir_node *op1, ir_node *op2)
354 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
355 res = optimize_node(res);
356 IRN_VRFY_IRG(res, irg);
361 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
362 ir_node *memop, ir_node *op1, ir_node *op2)
370 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
371 res = optimize_node(res);
372 IRN_VRFY_IRG(res, irg);
377 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
378 ir_node *op1, ir_node *op2, ir_mode *mode)
385 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
386 res = optimize_node(res);
387 IRN_VRFY_IRG(res, irg);
392 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
393 ir_node *op1, ir_node *op2, ir_mode *mode)
400 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
401 res = optimize_node(res);
402 IRN_VRFY_IRG(res, irg);
407 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
408 ir_node *op1, ir_node *op2, ir_mode *mode)
415 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
416 res = optimize_node (res);
417 IRN_VRFY_IRG(res, irg);
422 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
423 ir_node *op, ir_mode *mode)
427 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
428 res = optimize_node(res);
429 IRN_VRFY_IRG(res, irg);
434 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
435 ir_node *op, ir_node *k, ir_mode *mode)
442 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
450 ir_node *op, ir_node *k, ir_mode *mode)
457 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
458 res = optimize_node(res);
459 IRN_VRFY_IRG(res, irg);
464 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
465 ir_node *op, ir_node *k, ir_mode *mode)
472 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
473 res = optimize_node(res);
474 IRN_VRFY_IRG(res, irg);
479 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
480 ir_node *op, ir_node *k, ir_mode *mode)
487 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
495 ir_node *op, ir_mode *mode)
499 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
500 res = optimize_node (res);
501 IRN_VRFY_IRG(res, irg);
506 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
507 ir_node *op1, ir_node *op2)
514 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
515 res = optimize_node(res);
516 IRN_VRFY_IRG(res, irg);
521 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
525 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
526 res = optimize_node (res);
527 IRN_VRFY_IRG (res, irg);
532 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
536 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
537 res->attr.c.kind = dense;
538 res->attr.c.default_proj = 0;
539 res = optimize_node (res);
540 IRN_VRFY_IRG(res, irg);
545 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
546 ir_node *callee, int arity, ir_node **in, type *tp)
553 NEW_ARR_A(ir_node *, r_in, r_arity);
556 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
558 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
560 assert((get_unknown_type() == tp) || is_Method_type(tp));
561 set_Call_type(res, tp);
562 res->attr.call.exc.pin_state = op_pin_state_pinned;
563 res->attr.call.callee_arr = NULL;
564 res = optimize_node(res);
565 IRN_VRFY_IRG(res, irg);
570 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
571 ir_node *store, int arity, ir_node **in)
578 NEW_ARR_A (ir_node *, r_in, r_arity);
580 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
581 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
582 res = optimize_node(res);
583 IRN_VRFY_IRG(res, irg);
588 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
595 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
596 res = optimize_node(res);
597 IRN_VRFY_IRG(res, irg);
602 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
603 ir_node *store, ir_node *adr, ir_mode *mode)
610 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
611 res->attr.load.exc.pin_state = op_pin_state_pinned;
612 res->attr.load.load_mode = mode;
613 res->attr.load.volatility = volatility_non_volatile;
614 res = optimize_node(res);
615 IRN_VRFY_IRG(res, irg);
620 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
621 ir_node *store, ir_node *adr, ir_node *val)
629 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
630 res->attr.store.exc.pin_state = op_pin_state_pinned;
631 res->attr.store.volatility = volatility_non_volatile;
632 res = optimize_node(res);
633 IRN_VRFY_IRG(res, irg);
638 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
639 ir_node *size, type *alloc_type, where_alloc where)
646 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
647 res->attr.a.exc.pin_state = op_pin_state_pinned;
648 res->attr.a.where = where;
649 res->attr.a.type = alloc_type;
650 res = optimize_node(res);
651 IRN_VRFY_IRG(res, irg);
656 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
657 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
665 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
666 res->attr.f.where = where;
667 res->attr.f.type = free_type;
668 res = optimize_node(res);
669 IRN_VRFY_IRG(res, irg);
674 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
675 int arity, ir_node **in, entity *ent)
681 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
684 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
687 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
688 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
689 res->attr.s.ent = ent;
690 res = optimize_node(res);
691 IRN_VRFY_IRG(res, irg);
696 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
697 ir_node *objptr, type *ent)
704 NEW_ARR_A(ir_node *, r_in, r_arity);
708 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
709 res->attr.io.ent = ent;
711 /* res = optimize(res); */
712 IRN_VRFY_IRG(res, irg);
717 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
718 symconst_kind symkind, type *tp) {
722 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
727 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
729 res->attr.i.num = symkind;
730 res->attr.i.sym = value;
733 res = optimize_node(res);
734 IRN_VRFY_IRG(res, irg);
739 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
740 symconst_kind symkind)
742 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
746 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
747 symconst_symbol sym = {(type *)symbol};
748 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
751 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
752 symconst_symbol sym = {(type *)symbol};
753 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
756 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
757 symconst_symbol sym = {symbol};
758 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
761 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
762 symconst_symbol sym = {symbol};
763 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
767 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
771 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
772 res = optimize_node(res);
773 IRN_VRFY_IRG(res, irg);
778 new_rd_Bad (ir_graph *irg)
784 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
786 ir_node *in[2], *res;
790 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
791 res->attr.confirm_cmp = cmp;
792 res = optimize_node (res);
793 IRN_VRFY_IRG(res, irg);
798 new_rd_Unknown (ir_graph *irg, ir_mode *m)
800 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
804 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
809 in[0] = get_Call_ptr(call);
810 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
811 /* res->attr.callbegin.irg = irg; */
812 res->attr.callbegin.call = call;
813 res = optimize_node(res);
814 IRN_VRFY_IRG(res, irg);
819 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
823 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
825 IRN_VRFY_IRG(res, irg);
830 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
834 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
835 irg->end_except = res;
836 IRN_VRFY_IRG (res, irg);
841 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
845 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
846 res = optimize_node(res);
847 IRN_VRFY_IRG(res, irg);
852 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
857 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
858 res->attr.filter.proj = proj;
859 res->attr.filter.in_cg = NULL;
860 res->attr.filter.backedge = NULL;
863 assert(get_Proj_pred(res));
864 assert(get_nodes_block(get_Proj_pred(res)));
866 res = optimize_node(res);
867 IRN_VRFY_IRG(res, irg);
872 new_rd_NoMem (ir_graph *irg) {
877 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
878 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
887 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
890 res = optimize_node(res);
891 IRN_VRFY_IRG(res, irg);
896 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
897 return new_rd_Block(NULL, irg, arity, in);
899 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
900 return new_rd_Start(NULL, irg, block);
902 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
903 return new_rd_End(NULL, irg, block);
905 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
906 return new_rd_Jmp(NULL, irg, block);
908 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
909 return new_rd_Cond(NULL, irg, block, c);
911 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
912 ir_node *store, int arity, ir_node **in) {
913 return new_rd_Return(NULL, irg, block, store, arity, in);
915 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
916 ir_node *store, ir_node *obj) {
917 return new_rd_Raise(NULL, irg, block, store, obj);
919 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
920 ir_mode *mode, tarval *con) {
921 return new_rd_Const(NULL, irg, block, mode, con);
924 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
925 ir_mode *mode, long value) {
926 return new_rd_Const_long(NULL, irg, block, mode, value);
930 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
931 symconst_symbol value, symconst_kind symkind) {
932 return new_rd_SymConst(NULL, irg, block, value, symkind);
934 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
935 ir_node *objptr, int n_index, ir_node **index,
937 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
939 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
941 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
943 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
944 ir_node *callee, int arity, ir_node **in,
946 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
948 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
949 ir_node *op1, ir_node *op2, ir_mode *mode) {
950 return new_rd_Add(NULL, irg, block, op1, op2, mode);
952 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
953 ir_node *op1, ir_node *op2, ir_mode *mode) {
954 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
956 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
957 ir_node *op, ir_mode *mode) {
958 return new_rd_Minus(NULL, irg, block, op, mode);
960 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
961 ir_node *op1, ir_node *op2, ir_mode *mode) {
962 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
964 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
965 ir_node *memop, ir_node *op1, ir_node *op2) {
966 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
968 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
969 ir_node *memop, ir_node *op1, ir_node *op2) {
970 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
972 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
973 ir_node *memop, ir_node *op1, ir_node *op2) {
974 return new_rd_Div(NULL, irg, block, memop, op1, op2);
976 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
977 ir_node *memop, ir_node *op1, ir_node *op2) {
978 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
980 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
981 ir_node *op, ir_mode *mode) {
982 return new_rd_Abs(NULL, irg, block, op, mode);
984 ir_node *new_r_And (ir_graph *irg, ir_node *block,
985 ir_node *op1, ir_node *op2, ir_mode *mode) {
986 return new_rd_And(NULL, irg, block, op1, op2, mode);
988 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
989 ir_node *op1, ir_node *op2, ir_mode *mode) {
990 return new_rd_Or(NULL, irg, block, op1, op2, mode);
992 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
993 ir_node *op1, ir_node *op2, ir_mode *mode) {
994 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
996 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
997 ir_node *op, ir_mode *mode) {
998 return new_rd_Not(NULL, irg, block, op, mode);
1000 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1001 ir_node *op1, ir_node *op2) {
1002 return new_rd_Cmp(NULL, irg, block, op1, op2);
1004 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1005 ir_node *op, ir_node *k, ir_mode *mode) {
1006 return new_rd_Shl(NULL, irg, block, op, k, mode);
1008 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1009 ir_node *op, ir_node *k, ir_mode *mode) {
1010 return new_rd_Shr(NULL, irg, block, op, k, mode);
1012 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1013 ir_node *op, ir_node *k, ir_mode *mode) {
1014 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1016 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1017 ir_node *op, ir_node *k, ir_mode *mode) {
1018 return new_rd_Rot(NULL, irg, block, op, k, mode);
1020 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1021 ir_node *op, ir_mode *mode) {
1022 return new_rd_Conv(NULL, irg, block, op, mode);
1024 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1025 return new_rd_Cast(NULL, irg, block, op, to_tp);
1027 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1028 ir_node **in, ir_mode *mode) {
1029 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1031 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1032 ir_node *store, ir_node *adr, ir_mode *mode) {
1033 return new_rd_Load(NULL, irg, block, store, adr, mode);
1035 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1036 ir_node *store, ir_node *adr, ir_node *val) {
1037 return new_rd_Store(NULL, irg, block, store, adr, val);
1039 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1040 ir_node *size, type *alloc_type, where_alloc where) {
1041 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1043 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1044 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1045 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1047 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1048 return new_rd_Sync(NULL, irg, block, arity, in);
1050 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1051 ir_mode *mode, long proj) {
1052 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1054 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1056 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1058 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1059 int arity, ir_node **in) {
1060 return new_rd_Tuple(NULL, irg, block, arity, in );
1062 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1063 ir_node *val, ir_mode *mode) {
1064 return new_rd_Id(NULL, irg, block, val, mode);
1066 ir_node *new_r_Bad (ir_graph *irg) {
1067 return new_rd_Bad(irg);
1069 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1070 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1072 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1073 return new_rd_Unknown(irg, m);
1075 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1076 return new_rd_CallBegin(NULL, irg, block, callee);
1078 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1079 return new_rd_EndReg(NULL, irg, block);
1081 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1082 return new_rd_EndExcept(NULL, irg, block);
1084 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1085 return new_rd_Break(NULL, irg, block);
1087 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1088 ir_mode *mode, long proj) {
1089 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1091 ir_node *new_r_NoMem (ir_graph *irg) {
1092 return new_rd_NoMem(irg);
1094 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1095 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1096 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1100 /** ********************/
1101 /** public interfaces */
1102 /** construction tools */
1106 * - create a new Start node in the current block
1108 * @return s - pointer to the created Start node
1113 new_d_Start (dbg_info* db)
1117 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1118 op_Start, mode_T, 0, NULL);
1119 /* res->attr.start.irg = current_ir_graph; */
1121 res = optimize_node(res);
1122 IRN_VRFY_IRG(res, current_ir_graph);
1127 new_d_End (dbg_info* db)
1130 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1131 op_End, mode_X, -1, NULL);
1132 res = optimize_node(res);
1133 IRN_VRFY_IRG(res, current_ir_graph);
1138 /* Constructs a Block with a fixed number of predecessors.
1139 Does set current_block. Can be used with automatic Phi
1140 node construction. */
1142 new_d_Block (dbg_info* db, int arity, ir_node **in)
1146 bool has_unknown = false;
1148 res = new_rd_Block(db, current_ir_graph, arity, in);
1150 /* Create and initialize array for Phi-node construction. */
1151 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1152 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1153 current_ir_graph->n_loc);
1154 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1157 for (i = arity-1; i >= 0; i--)
1158 if (get_irn_op(in[i]) == op_Unknown) {
1163 if (!has_unknown) res = optimize_node(res);
1164 current_ir_graph->current_block = res;
1166 IRN_VRFY_IRG(res, current_ir_graph);
1171 /* ***********************************************************************/
1172 /* Methods necessary for automatic Phi node creation */
1174 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1175 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1176 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1177 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1179 Call Graph: ( A ---> B == A "calls" B)
1181 get_value mature_immBlock
1189 get_r_value_internal |
1193 new_rd_Phi0 new_rd_Phi_in
1195 * *************************************************************************** */
1197 /** Creates a Phi node with 0 predecessors */
1198 static INLINE ir_node *
1199 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1203 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1204 IRN_VRFY_IRG(res, irg);
1208 /* There are two implementations of the Phi node construction. The first
1209 is faster, but does not work for blocks with more than 2 predecessors.
1210 The second works always but is slower and causes more unnecessary Phi
1212 Select the implementations by the following preprocessor flag set in
1214 #if USE_FAST_PHI_CONSTRUCTION
1216 /* This is a stack used for allocating and deallocating nodes in
1217 new_rd_Phi_in. The original implementation used the obstack
1218 to model this stack, now it is explicit. This reduces side effects.
1220 #if USE_EXPLICIT_PHI_IN_STACK
1222 new_Phi_in_stack(void) {
1225 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1227 res->stack = NEW_ARR_F (ir_node *, 0);
1234 free_Phi_in_stack(Phi_in_stack *s) {
1235 DEL_ARR_F(s->stack);
1239 free_to_Phi_in_stack(ir_node *phi) {
1240 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1241 current_ir_graph->Phi_in_stack->pos)
1242 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1244 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1246 (current_ir_graph->Phi_in_stack->pos)++;
1249 static INLINE ir_node *
1250 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1251 int arity, ir_node **in) {
1253 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1254 int pos = current_ir_graph->Phi_in_stack->pos;
1258 /* We need to allocate a new node */
1259 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1260 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1262 /* reuse the old node and initialize it again. */
1265 assert (res->kind == k_ir_node);
1266 assert (res->op == op_Phi);
1270 assert (arity >= 0);
1271 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1272 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1274 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1276 (current_ir_graph->Phi_in_stack->pos)--;
1280 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1282 /* Creates a Phi node with a given, fixed array **in of predecessors.
1283 If the Phi node is unnecessary, as the same value reaches the block
1284 through all control flow paths, it is eliminated and the value
1285 returned directly. This constructor is only intended for use in
1286 the automatic Phi node generation triggered by get_value or mature.
1287 The implementation is quite tricky and depends on the fact, that
1288 the nodes are allocated on a stack:
1289 The in array contains predecessors and NULLs. The NULLs appear,
1290 if get_r_value_internal, that computed the predecessors, reached
1291 the same block on two paths. In this case the same value reaches
1292 this block on both paths, there is no definition in between. We need
1293 not allocate a Phi where these path's merge, but we have to communicate
1294 this fact to the caller. This happens by returning a pointer to the
1295 node the caller _will_ allocate. (Yes, we predict the address. We can
1296 do so because the nodes are allocated on the obstack.) The caller then
1297 finds a pointer to itself and, when this routine is called again,
1300 static INLINE ir_node *
1301 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1304 ir_node *res, *known;
1306 /* Allocate a new node on the obstack. This can return a node to
1307 which some of the pointers in the in-array already point.
1308 Attention: the constructor copies the in array, i.e., the later
1309 changes to the array in this routine do not affect the
1310 constructed node! If the in array contains NULLs, there will be
1311 missing predecessors in the returned node. Is this a possible
1312 internal state of the Phi node generation? */
1313 #if USE_EXPLICIT_PHI_IN_STACK
1314 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1316 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1317 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1320 /* The in-array can contain NULLs. These were returned by
1321 get_r_value_internal if it reached the same block/definition on a
1322 second path. The NULLs are replaced by the node itself to
1323 simplify the test in the next loop. */
1324 for (i = 0; i < ins; ++i) {
1329 /* This loop checks whether the Phi has more than one predecessor.
1330 If so, it is a real Phi node and we break the loop. Else the Phi
1331 node merges the same definition on several paths and therefore is
1333 for (i = 0; i < ins; ++i)
1335 if (in[i] == res || in[i] == known) continue;
1343 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1345 #if USE_EXPLICIT_PHI_IN_STACK
1346 free_to_Phi_in_stack(res);
1348 edges_node_deleted(res, current_ir_graph);
1349 obstack_free (current_ir_graph->obst, res);
1353 res = optimize_node (res);
1354 IRN_VRFY_IRG(res, irg);
1357 /* return the pointer to the Phi node. This node might be deallocated! */
1362 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1365 allocates and returns this node. The routine called to allocate the
1366 node might optimize it away and return a real value, or even a pointer
1367 to a deallocated Phi node on top of the obstack!
1368 This function is called with an in-array of proper size. **/
1370 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1372 ir_node *prevBlock, *res;
1375 /* This loop goes to all predecessor blocks of the block the Phi node is in
1376 and there finds the operands of the Phi node by calling
1377 get_r_value_internal. */
1378 for (i = 1; i <= ins; ++i) {
1379 assert (block->in[i]);
1380 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1382 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1385 /* After collecting all predecessors into the array nin a new Phi node
1386 with these predecessors is created. This constructor contains an
1387 optimization: If all predecessors of the Phi node are identical it
1388 returns the only operand instead of a new Phi node. If the value
1389 passes two different control flow edges without being defined, and
1390 this is the second path treated, a pointer to the node that will be
1391 allocated for the first path (recursion) is returned. We already
1392 know the address of this node, as it is the next node to be allocated
1393 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1394 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1396 /* Now we now the value for "pos" and can enter it in the array with
1397 all known local variables. Attention: this might be a pointer to
1398 a node, that later will be allocated!!! See new_rd_Phi_in.
1399 If this is called in mature, after some set_value in the same block,
1400 the proper value must not be overwritten:
1402 get_value (makes Phi0, put's it into graph_arr)
1403 set_value (overwrites Phi0 in graph_arr)
1404 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1407 if (!block->attr.block.graph_arr[pos]) {
1408 block->attr.block.graph_arr[pos] = res;
1410 /* printf(" value already computed by %s\n",
1411 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1417 /* This function returns the last definition of a variable. In case
1418 this variable was last defined in a previous block, Phi nodes are
1419 inserted. If the part of the firm graph containing the definition
1420 is not yet constructed, a dummy Phi node is returned. */
1422 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1425 /* There are 4 cases to treat.
1427 1. The block is not mature and we visit it the first time. We can not
1428 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1429 predecessors is returned. This node is added to the linked list (field
1430 "link") of the containing block to be completed when this block is
1431 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1434 2. The value is already known in this block, graph_arr[pos] is set and we
1435 visit the block the first time. We can return the value without
1436 creating any new nodes.
1438 3. The block is mature and we visit it the first time. A Phi node needs
1439 to be created (phi_merge). If the Phi is not needed, as all it's
1440 operands are the same value reaching the block through different
1441 paths, it's optimized away and the value itself is returned.
1443 4. The block is mature, and we visit it the second time. Now two
1444 subcases are possible:
1445 * The value was computed completely the last time we were here. This
1446 is the case if there is no loop. We can return the proper value.
1447 * The recursion that visited this node and set the flag did not
1448 return yet. We are computing a value in a loop and need to
1449 break the recursion without knowing the result yet.
1450 @@@ strange case. Straight forward we would create a Phi before
1451 starting the computation of it's predecessors. In this case we will
1452 find a Phi here in any case. The problem is that this implementation
1453 only creates a Phi after computing the predecessors, so that it is
1454 hard to compute self references of this Phi. @@@
1455 There is no simple check for the second subcase. Therefore we check
1456 for a second visit and treat all such cases as the second subcase.
1457 Anyways, the basic situation is the same: we reached a block
1458 on two paths without finding a definition of the value: No Phi
1459 nodes are needed on both paths.
1460 We return this information "Two paths, no Phi needed" by a very tricky
1461 implementation that relies on the fact that an obstack is a stack and
1462 will return a node with the same address on different allocations.
1463 Look also at phi_merge and new_rd_phi_in to understand this.
1464 @@@ Unfortunately this does not work, see testprogram
1465 three_cfpred_example.
1469 /* case 4 -- already visited. */
1470 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1472 /* visited the first time */
1473 set_irn_visited(block, get_irg_visited(current_ir_graph));
1475 /* Get the local valid value */
1476 res = block->attr.block.graph_arr[pos];
1478 /* case 2 -- If the value is actually computed, return it. */
1479 if (res) return res;
1481 if (block->attr.block.matured) { /* case 3 */
1483 /* The Phi has the same amount of ins as the corresponding block. */
1484 int ins = get_irn_arity(block);
1486 NEW_ARR_A (ir_node *, nin, ins);
1488 /* Phi merge collects the predecessors and then creates a node. */
1489 res = phi_merge (block, pos, mode, nin, ins);
1491 } else { /* case 1 */
1492 /* The block is not mature, we don't know how many in's are needed. A Phi
1493 with zero predecessors is created. Such a Phi node is called Phi0
1494 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1495 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1497 The Phi0 has to remember the pos of it's internal value. If the real
1498 Phi is computed, pos is used to update the array with the local
1501 res = new_rd_Phi0 (current_ir_graph, block, mode);
1502 res->attr.phi0_pos = pos;
1503 res->link = block->link;
1507 /* If we get here, the frontend missed a use-before-definition error */
1510 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1511 assert (mode->code >= irm_F && mode->code <= irm_P);
1512 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1513 tarval_mode_null[mode->code]);
1516 /* The local valid value is available now. */
1517 block->attr.block.graph_arr[pos] = res;
1525 it starts the recursion. This causes an Id at the entry of
1526 every block that has no definition of the value! **/
1528 #if USE_EXPLICIT_PHI_IN_STACK
1530 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1531 void free_Phi_in_stack(Phi_in_stack *s) { }
1534 static INLINE ir_node *
1535 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1536 ir_node **in, int ins, ir_node *phi0)
1539 ir_node *res, *known;
1541 /* Allocate a new node on the obstack. The allocation copies the in
1543 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1544 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1546 /* This loop checks whether the Phi has more than one predecessor.
1547 If so, it is a real Phi node and we break the loop. Else the
1548 Phi node merges the same definition on several paths and therefore
1549 is not needed. Don't consider Bad nodes! */
1551 for (i=0; i < ins; ++i)
1555 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1557 /* Optimize self referencing Phis: We can't detect them yet properly, as
1558 they still refer to the Phi0 they will replace. So replace right now. */
1559 if (phi0 && in[i] == phi0) in[i] = res;
1561 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1569 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1572 edges_node_deleted(res, current_ir_graph);
1573 obstack_free (current_ir_graph->obst, res);
1574 if (is_Phi(known)) {
1575 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1576 order, an enclosing Phi know may get superfluous. */
1577 res = optimize_in_place_2(known);
1578 if (res != known) { exchange(known, res); }
1583 /* A undefined value, e.g., in unreachable code. */
1587 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1588 IRN_VRFY_IRG(res, irg);
1589 /* Memory Phis in endless loops must be kept alive.
1590 As we can't distinguish these easily we keep all of them alive. */
1591 if ((res->op == op_Phi) && (mode == mode_M))
1592 add_End_keepalive(irg->end, res);
1599 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1601 #if PRECISE_EXC_CONTEXT
1603 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1605 /* Construct a new frag_array for node n.
1606 Copy the content from the current graph_arr of the corresponding block:
1607 this is the current state.
1608 Set ProjM(n) as current memory state.
1609 Further the last entry in frag_arr of current block points to n. This
1610 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1612 static INLINE ir_node ** new_frag_arr (ir_node *n)
1617 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1618 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1619 sizeof(ir_node *)*current_ir_graph->n_loc);
1621 /* turn off optimization before allocating Proj nodes, as res isn't
1623 opt = get_opt_optimize(); set_optimize(0);
1624 /* Here we rely on the fact that all frag ops have Memory as first result! */
1625 if (get_irn_op(n) == op_Call)
1626 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1628 assert((pn_Quot_M == pn_DivMod_M) &&
1629 (pn_Quot_M == pn_Div_M) &&
1630 (pn_Quot_M == pn_Mod_M) &&
1631 (pn_Quot_M == pn_Load_M) &&
1632 (pn_Quot_M == pn_Store_M) &&
1633 (pn_Quot_M == pn_Alloc_M) );
1634 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1638 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1643 * returns the frag_arr from a node
1645 static INLINE ir_node **
1646 get_frag_arr (ir_node *n) {
1647 switch (get_irn_opcode(n)) {
1649 return n->attr.call.exc.frag_arr;
1651 return n->attr.a.exc.frag_arr;
1653 return n->attr.load.exc.frag_arr;
1655 return n->attr.store.exc.frag_arr;
1657 return n->attr.except.frag_arr;
1662 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1664 if (!frag_arr[pos]) frag_arr[pos] = val;
1665 if (frag_arr[current_ir_graph->n_loc - 1]) {
1666 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1667 assert(arr != frag_arr && "Endless recursion detected");
1668 set_frag_value(arr, pos, val);
1673 for (i = 0; i < 1000; ++i) {
1674 if (!frag_arr[pos]) {
1675 frag_arr[pos] = val;
1677 if (frag_arr[current_ir_graph->n_loc - 1]) {
1678 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1684 assert(0 && "potential endless recursion");
1689 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1693 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1695 frag_arr = get_frag_arr(cfOp);
1696 res = frag_arr[pos];
1698 if (block->attr.block.graph_arr[pos]) {
1699 /* There was a set_value after the cfOp and no get_value before that
1700 set_value. We must build a Phi node now. */
1701 if (block->attr.block.matured) {
1702 int ins = get_irn_arity(block);
1704 NEW_ARR_A (ir_node *, nin, ins);
1705 res = phi_merge(block, pos, mode, nin, ins);
1707 res = new_rd_Phi0 (current_ir_graph, block, mode);
1708 res->attr.phi0_pos = pos;
1709 res->link = block->link;
1713 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1714 but this should be better: (remove comment if this works) */
1715 /* It's a Phi, we can write this into all graph_arrs with NULL */
1716 set_frag_value(block->attr.block.graph_arr, pos, res);
1718 res = get_r_value_internal(block, pos, mode);
1719 set_frag_value(block->attr.block.graph_arr, pos, res);
1727 computes the predecessors for the real phi node, and then
1728 allocates and returns this node. The routine called to allocate the
1729 node might optimize it away and return a real value.
1730 This function must be called with an in-array of proper size. **/
1732 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1734 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1737 /* If this block has no value at pos create a Phi0 and remember it
1738 in graph_arr to break recursions.
1739 Else we may not set graph_arr as there a later value is remembered. */
1741 if (!block->attr.block.graph_arr[pos]) {
1742 if (block == get_irg_start_block(current_ir_graph)) {
1743 /* Collapsing to Bad tarvals is no good idea.
1744 So we call a user-supplied routine here that deals with this case as
1745 appropriate for the given language. Sorryly the only help we can give
1746 here is the position.
1748 Even if all variables are defined before use, it can happen that
1749 we get to the start block, if a cond has been replaced by a tuple
1750 (bad, jmp). In this case we call the function needlessly, eventually
1751 generating an non existant error.
1752 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1755 if (default_initialize_local_variable)
1756 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1758 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1759 /* We don't need to care about exception ops in the start block.
1760 There are none by definition. */
1761 return block->attr.block.graph_arr[pos];
1763 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1764 block->attr.block.graph_arr[pos] = phi0;
1765 #if PRECISE_EXC_CONTEXT
1766 if (get_opt_precise_exc_context()) {
1767 /* Set graph_arr for fragile ops. Also here we should break recursion.
1768 We could choose a cyclic path through an cfop. But the recursion would
1769 break at some point. */
1770 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1776 /* This loop goes to all predecessor blocks of the block the Phi node
1777 is in and there finds the operands of the Phi node by calling
1778 get_r_value_internal. */
1779 for (i = 1; i <= ins; ++i) {
1780 prevCfOp = skip_Proj(block->in[i]);
1782 if (is_Bad(prevCfOp)) {
1783 /* In case a Cond has been optimized we would get right to the start block
1784 with an invalid definition. */
1785 nin[i-1] = new_Bad();
1788 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1790 if (!is_Bad(prevBlock)) {
1791 #if PRECISE_EXC_CONTEXT
1792 if (get_opt_precise_exc_context() &&
1793 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1794 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1795 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1798 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1800 nin[i-1] = new_Bad();
1804 /* We want to pass the Phi0 node to the constructor: this finds additional
1805 optimization possibilities.
1806 The Phi0 node either is allocated in this function, or it comes from
1807 a former call to get_r_value_internal. In this case we may not yet
1808 exchange phi0, as this is done in mature_immBlock. */
1810 phi0_all = block->attr.block.graph_arr[pos];
1811 if (!((get_irn_op(phi0_all) == op_Phi) &&
1812 (get_irn_arity(phi0_all) == 0) &&
1813 (get_nodes_block(phi0_all) == block)))
1819 /* After collecting all predecessors into the array nin a new Phi node
1820 with these predecessors is created. This constructor contains an
1821 optimization: If all predecessors of the Phi node are identical it
1822 returns the only operand instead of a new Phi node. */
1823 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1825 /* In case we allocated a Phi0 node at the beginning of this procedure,
1826 we need to exchange this Phi0 with the real Phi. */
1828 exchange(phi0, res);
1829 block->attr.block.graph_arr[pos] = res;
1830 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1831 only an optimization. */
1837 /* This function returns the last definition of a variable. In case
1838 this variable was last defined in a previous block, Phi nodes are
1839 inserted. If the part of the firm graph containing the definition
1840 is not yet constructed, a dummy Phi node is returned. */
1842 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1845 /* There are 4 cases to treat.
1847 1. The block is not mature and we visit it the first time. We can not
1848 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1849 predecessors is returned. This node is added to the linked list (field
1850 "link") of the containing block to be completed when this block is
1851 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1854 2. The value is already known in this block, graph_arr[pos] is set and we
1855 visit the block the first time. We can return the value without
1856 creating any new nodes.
1858 3. The block is mature and we visit it the first time. A Phi node needs
1859 to be created (phi_merge). If the Phi is not needed, as all it's
1860 operands are the same value reaching the block through different
1861 paths, it's optimized away and the value itself is returned.
1863 4. The block is mature, and we visit it the second time. Now two
1864 subcases are possible:
1865 * The value was computed completely the last time we were here. This
1866 is the case if there is no loop. We can return the proper value.
1867 * The recursion that visited this node and set the flag did not
1868 return yet. We are computing a value in a loop and need to
1869 break the recursion. This case only happens if we visited
1870 the same block with phi_merge before, which inserted a Phi0.
1871 So we return the Phi0.
1874 /* case 4 -- already visited. */
1875 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1876 /* As phi_merge allocates a Phi0 this value is always defined. Here
1877 is the critical difference of the two algorithms. */
1878 assert(block->attr.block.graph_arr[pos]);
1879 return block->attr.block.graph_arr[pos];
1882 /* visited the first time */
1883 set_irn_visited(block, get_irg_visited(current_ir_graph));
1885 /* Get the local valid value */
1886 res = block->attr.block.graph_arr[pos];
1888 /* case 2 -- If the value is actually computed, return it. */
1889 if (res) { return res; };
1891 if (block->attr.block.matured) { /* case 3 */
1893 /* The Phi has the same amount of ins as the corresponding block. */
1894 int ins = get_irn_arity(block);
1896 NEW_ARR_A (ir_node *, nin, ins);
1898 /* Phi merge collects the predecessors and then creates a node. */
1899 res = phi_merge (block, pos, mode, nin, ins);
1901 } else { /* case 1 */
1902 /* The block is not mature, we don't know how many in's are needed. A Phi
1903 with zero predecessors is created. Such a Phi node is called Phi0
1904 node. The Phi0 is then added to the list of Phi0 nodes in this block
1905 to be matured by mature_immBlock later.
1906 The Phi0 has to remember the pos of it's internal value. If the real
1907 Phi is computed, pos is used to update the array with the local
1909 res = new_rd_Phi0 (current_ir_graph, block, mode);
1910 res->attr.phi0_pos = pos;
1911 res->link = block->link;
1915 /* If we get here, the frontend missed a use-before-definition error */
1918 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1919 assert (mode->code >= irm_F && mode->code <= irm_P);
1920 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1921 get_mode_null(mode));
1924 /* The local valid value is available now. */
1925 block->attr.block.graph_arr[pos] = res;
1930 #endif /* USE_FAST_PHI_CONSTRUCTION */
1932 /* ************************************************************************** */
1934 /** Finalize a Block node, when all control flows are known. */
1935 /** Acceptable parameters are only Block nodes. */
1937 mature_immBlock (ir_node *block)
1944 assert (get_irn_opcode(block) == iro_Block);
1945 /* @@@ should be commented in
1946 assert (!get_Block_matured(block) && "Block already matured"); */
1948 if (!get_Block_matured(block)) {
1949 ins = ARR_LEN (block->in)-1;
1950 /* Fix block parameters */
1951 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1953 /* An array for building the Phi nodes. */
1954 NEW_ARR_A (ir_node *, nin, ins);
1956 /* Traverse a chain of Phi nodes attached to this block and mature
1958 for (n = block->link; n; n=next) {
1959 inc_irg_visited(current_ir_graph);
1961 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1964 block->attr.block.matured = 1;
1966 /* Now, as the block is a finished firm node, we can optimize it.
1967 Since other nodes have been allocated since the block was created
1968 we can not free the node on the obstack. Therefore we have to call
1970 Unfortunately the optimization does not change a lot, as all allocated
1971 nodes refer to the unoptimized node.
1972 We can call _2, as global cse has no effect on blocks. */
1973 block = optimize_in_place_2(block);
1974 IRN_VRFY_IRG(block, current_ir_graph);
1979 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1981 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1986 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1988 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1993 new_d_Const_long(dbg_info* db, ir_mode *mode, long value)
1995 return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value);
1999 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
2001 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
2007 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
2009 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
2014 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
2016 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2021 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2024 assert(arg->op == op_Cond);
2025 arg->attr.c.kind = fragmentary;
2026 arg->attr.c.default_proj = max_proj;
2027 res = new_Proj (arg, mode_X, max_proj);
2032 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2034 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2039 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2041 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2045 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2047 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2052 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2054 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2059 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2061 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2067 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2069 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2074 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2076 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2081 * allocate the frag array
2083 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2084 if (get_opt_precise_exc_context()) {
2085 if ((current_ir_graph->phase_state == phase_building) &&
2086 (get_irn_op(res) == op) && /* Could be optimized away. */
2087 !*frag_store) /* Could be a cse where the arr is already set. */ {
2088 *frag_store = new_frag_arr(res);
2095 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2098 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2100 res->attr.except.pin_state = op_pin_state_pinned;
2101 #if PRECISE_EXC_CONTEXT
2102 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2109 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2112 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2114 res->attr.except.pin_state = op_pin_state_pinned;
2115 #if PRECISE_EXC_CONTEXT
2116 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2123 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2126 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2128 res->attr.except.pin_state = op_pin_state_pinned;
2129 #if PRECISE_EXC_CONTEXT
2130 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2137 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2140 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2142 res->attr.except.pin_state = op_pin_state_pinned;
2143 #if PRECISE_EXC_CONTEXT
2144 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2151 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2153 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2158 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2160 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2165 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2167 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2172 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2174 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2179 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2181 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2186 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2188 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2193 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2195 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2200 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2202 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2207 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2209 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2214 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2216 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2221 new_d_Jmp (dbg_info* db)
2223 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2227 new_d_Cond (dbg_info* db, ir_node *c)
2229 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2233 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2237 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2238 store, callee, arity, in, tp);
2239 #if PRECISE_EXC_CONTEXT
2240 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2247 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2249 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2254 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2256 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2261 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2264 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2266 #if PRECISE_EXC_CONTEXT
2267 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2274 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2277 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2279 #if PRECISE_EXC_CONTEXT
2280 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2287 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2291 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2292 store, size, alloc_type, where);
2293 #if PRECISE_EXC_CONTEXT
2294 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2301 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2302 ir_node *size, type *free_type, where_alloc where)
2304 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2305 store, ptr, size, free_type, where);
2309 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2310 /* GL: objptr was called frame before. Frame was a bad choice for the name
2311 as the operand could as well be a pointer to a dynamic object. */
2313 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2314 store, objptr, 0, NULL, ent);
2318 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2320 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2321 store, objptr, n_index, index, sel);
2325 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2327 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2328 store, objptr, ent));
2332 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2334 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2339 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2341 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2346 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2348 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2356 return _new_d_Bad();
2360 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2362 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2367 new_d_Unknown (ir_mode *m)
2369 return new_rd_Unknown(current_ir_graph, m);
2373 new_d_CallBegin (dbg_info *db, ir_node *call)
2376 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2381 new_d_EndReg (dbg_info *db)
2384 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2389 new_d_EndExcept (dbg_info *db)
2392 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2397 new_d_Break (dbg_info *db)
2399 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2403 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2405 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2412 return _new_d_NoMem();
2416 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2417 ir_node *ir_true, ir_mode *mode) {
2418 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2419 sel, ir_false, ir_true, mode);
2422 /* ********************************************************************* */
2423 /* Comfortable interface with automatic Phi node construction. */
2424 /* (Uses also constructors of ?? interface, except new_Block. */
2425 /* ********************************************************************* */
2427 /* * Block construction **/
2428 /* immature Block without predecessors */
2429 ir_node *new_d_immBlock (dbg_info* db) {
2432 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2433 /* creates a new dynamic in-array as length of in is -1 */
2434 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2435 current_ir_graph->current_block = res;
2436 res->attr.block.matured = 0;
2437 res->attr.block.dead = 0;
2438 /* res->attr.block.exc = exc_normal; */
2439 /* res->attr.block.handler_entry = 0; */
2440 res->attr.block.irg = current_ir_graph;
2441 res->attr.block.backedge = NULL;
2442 res->attr.block.in_cg = NULL;
2443 res->attr.block.cg_backedge = NULL;
2444 set_Block_block_visited(res, 0);
2446 /* Create and initialize array for Phi-node construction. */
2447 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2448 current_ir_graph->n_loc);
2449 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2451 /* Immature block may not be optimized! */
2452 IRN_VRFY_IRG(res, current_ir_graph);
2458 new_immBlock (void) {
2459 return new_d_immBlock(NULL);
2462 /* add an adge to a jmp/control flow node */
2464 add_immBlock_pred (ir_node *block, ir_node *jmp)
2466 if (block->attr.block.matured) {
2467 assert(0 && "Error: Block already matured!\n");
2470 assert(jmp != NULL);
2471 ARR_APP1(ir_node *, block->in, jmp);
2475 /* changing the current block */
2477 set_cur_block (ir_node *target)
2479 current_ir_graph->current_block = target;
2482 /* ************************ */
2483 /* parameter administration */
2485 /* get a value from the parameter array from the current block by its index */
2487 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2489 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2490 inc_irg_visited(current_ir_graph);
2492 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2494 /* get a value from the parameter array from the current block by its index */
2496 get_value (int pos, ir_mode *mode)
2498 return get_d_value(NULL, pos, mode);
2501 /* set a value at position pos in the parameter array from the current block */
2503 set_value (int pos, ir_node *value)
2505 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2506 assert(pos+1 < current_ir_graph->n_loc);
2507 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2510 /* get the current store */
2514 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2515 /* GL: one could call get_value instead */
2516 inc_irg_visited(current_ir_graph);
2517 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2520 /* set the current store */
2522 set_store (ir_node *store)
2524 /* GL: one could call set_value instead */
2525 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2526 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2530 keep_alive (ir_node *ka)
2532 add_End_keepalive(current_ir_graph->end, ka);
2535 /** Useful access routines **/
2536 /* Returns the current block of the current graph. To set the current
2537 block use set_cur_block. */
2538 ir_node *get_cur_block() {
2539 return get_irg_current_block(current_ir_graph);
2542 /* Returns the frame type of the current graph */
2543 type *get_cur_frame_type() {
2544 return get_irg_frame_type(current_ir_graph);
2548 /* ********************************************************************* */
2551 /* call once for each run of the library */
2553 init_cons(uninitialized_local_variable_func_t *func)
2555 default_initialize_local_variable = func;
2558 /* call for each graph */
2560 finalize_cons (ir_graph *irg) {
2561 irg->phase_state = phase_high;
2565 ir_node *new_Block(int arity, ir_node **in) {
2566 return new_d_Block(NULL, arity, in);
2568 ir_node *new_Start (void) {
2569 return new_d_Start(NULL);
2571 ir_node *new_End (void) {
2572 return new_d_End(NULL);
2574 ir_node *new_Jmp (void) {
2575 return new_d_Jmp(NULL);
2577 ir_node *new_Cond (ir_node *c) {
2578 return new_d_Cond(NULL, c);
2580 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2581 return new_d_Return(NULL, store, arity, in);
2583 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2584 return new_d_Raise(NULL, store, obj);
2586 ir_node *new_Const (ir_mode *mode, tarval *con) {
2587 return new_d_Const(NULL, mode, con);
2590 ir_node *new_Const_long(ir_mode *mode, long value)
2592 return new_d_Const_long(NULL, mode, value);
2595 ir_node *new_Const_type(tarval *con, type *tp) {
2596 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2599 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2600 return new_d_SymConst(NULL, value, kind);
2602 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2603 return new_d_simpleSel(NULL, store, objptr, ent);
2605 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2607 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2609 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2610 return new_d_InstOf (NULL, store, objptr, ent);
2612 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2614 return new_d_Call(NULL, store, callee, arity, in, tp);
2616 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2617 return new_d_Add(NULL, op1, op2, mode);
2619 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2620 return new_d_Sub(NULL, op1, op2, mode);
2622 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2623 return new_d_Minus(NULL, op, mode);
2625 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2626 return new_d_Mul(NULL, op1, op2, mode);
2628 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2629 return new_d_Quot(NULL, memop, op1, op2);
2631 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2632 return new_d_DivMod(NULL, memop, op1, op2);
2634 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2635 return new_d_Div(NULL, memop, op1, op2);
2637 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2638 return new_d_Mod(NULL, memop, op1, op2);
2640 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2641 return new_d_Abs(NULL, op, mode);
2643 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2644 return new_d_And(NULL, op1, op2, mode);
2646 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2647 return new_d_Or(NULL, op1, op2, mode);
2649 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2650 return new_d_Eor(NULL, op1, op2, mode);
2652 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2653 return new_d_Not(NULL, op, mode);
2655 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2656 return new_d_Shl(NULL, op, k, mode);
2658 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2659 return new_d_Shr(NULL, op, k, mode);
2661 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2662 return new_d_Shrs(NULL, op, k, mode);
2664 #define new_Rotate new_Rot
2665 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2666 return new_d_Rot(NULL, op, k, mode);
2668 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2669 return new_d_Cmp(NULL, op1, op2);
2671 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2672 return new_d_Conv(NULL, op, mode);
2674 ir_node *new_Cast (ir_node *op, type *to_tp) {
2675 return new_d_Cast(NULL, op, to_tp);
2677 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2678 return new_d_Phi(NULL, arity, in, mode);
2680 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2681 return new_d_Load(NULL, store, addr, mode);
2683 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2684 return new_d_Store(NULL, store, addr, val);
2686 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2687 where_alloc where) {
2688 return new_d_Alloc(NULL, store, size, alloc_type, where);
2690 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2691 type *free_type, where_alloc where) {
2692 return new_d_Free(NULL, store, ptr, size, free_type, where);
2694 ir_node *new_Sync (int arity, ir_node **in) {
2695 return new_d_Sync(NULL, arity, in);
2697 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2698 return new_d_Proj(NULL, arg, mode, proj);
2700 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2701 return new_d_defaultProj(NULL, arg, max_proj);
2703 ir_node *new_Tuple (int arity, ir_node **in) {
2704 return new_d_Tuple(NULL, arity, in);
2706 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2707 return new_d_Id(NULL, val, mode);
2709 ir_node *new_Bad (void) {
2712 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2713 return new_d_Confirm (NULL, val, bound, cmp);
2715 ir_node *new_Unknown(ir_mode *m) {
2716 return new_d_Unknown(m);
2718 ir_node *new_CallBegin (ir_node *callee) {
2719 return new_d_CallBegin(NULL, callee);
2721 ir_node *new_EndReg (void) {
2722 return new_d_EndReg(NULL);
2724 ir_node *new_EndExcept (void) {
2725 return new_d_EndExcept(NULL);
2727 ir_node *new_Break (void) {
2728 return new_d_Break(NULL);
2730 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2731 return new_d_Filter(NULL, arg, mode, proj);
2733 ir_node *new_NoMem (void) {
2734 return new_d_NoMem();
2736 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2737 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);