3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * language dependant initialization variable
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* -------------------------------------------- */
66 /* privat interfaces, for professional use only */
67 /* -------------------------------------------- */
69 /* Constructs a Block with a fixed number of predecessors.
70 Does not set current_block. Can not be used with automatic
71 Phi node construction. */
73 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
77 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
78 set_Block_matured(res, 1);
79 set_Block_block_visited(res, 0);
81 /* res->attr.block.exc = exc_normal; */
82 /* res->attr.block.handler_entry = 0; */
83 res->attr.block.dead = 0;
84 res->attr.block.irg = irg;
85 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
86 res->attr.block.in_cg = NULL;
87 res->attr.block.cg_backedge = NULL;
88 res->attr.block.extblk = NULL;
90 IRN_VRFY_IRG(res, irg);
95 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
99 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
100 /* res->attr.start.irg = irg; */
102 IRN_VRFY_IRG(res, irg);
107 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
124 bool has_unknown = false;
126 /* Don't assert that block matured: the use of this constructor is strongly
128 if ( get_Block_matured(block) )
129 assert( get_irn_arity(block) == arity );
131 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
133 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
135 for (i = arity-1; i >= 0; i--)
136 if (get_irn_op(in[i]) == op_Unknown) {
141 if (!has_unknown) res = optimize_node (res);
142 IRN_VRFY_IRG(res, irg);
144 /* Memory Phis in endless loops must be kept alive.
145 As we can't distinguish these easily we keep all of them alive. */
146 if ((res->op == op_Phi) && (mode == mode_M))
147 add_End_keepalive(irg->end, res);
152 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
157 res->attr.con.tv = con;
158 set_Const_type(res, tp); /* Call method because of complex assertion. */
159 res = optimize_node (res);
160 assert(get_Const_type(res) == tp);
161 IRN_VRFY_IRG(res, irg);
167 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
169 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
173 new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
175 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
179 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
183 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
184 res = optimize_node(res);
185 IRN_VRFY_IRG(res, irg);
190 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
195 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
196 res->attr.proj = proj;
199 assert(get_Proj_pred(res));
200 assert(get_nodes_block(get_Proj_pred(res)));
202 res = optimize_node(res);
204 IRN_VRFY_IRG(res, irg);
210 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
214 assert(arg->op == op_Cond);
215 arg->attr.c.kind = fragmentary;
216 arg->attr.c.default_proj = max_proj;
217 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
222 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
226 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
227 res = optimize_node(res);
228 IRN_VRFY_IRG(res, irg);
233 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
237 assert(is_atomic_type(to_tp));
239 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
240 res->attr.cast.totype = to_tp;
241 res = optimize_node(res);
242 IRN_VRFY_IRG(res, irg);
247 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
251 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
252 res = optimize_node (res);
253 IRN_VRFY_IRG(res, irg);
258 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
259 ir_node *op1, ir_node *op2, ir_mode *mode)
266 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
267 res = optimize_node(res);
268 IRN_VRFY_IRG(res, irg);
273 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
274 ir_node *op1, ir_node *op2, ir_mode *mode)
281 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
282 res = optimize_node (res);
283 IRN_VRFY_IRG(res, irg);
288 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *op, ir_mode *mode)
293 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
294 res = optimize_node(res);
295 IRN_VRFY_IRG(res, irg);
300 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *op1, ir_node *op2, ir_mode *mode)
308 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
309 res = optimize_node(res);
310 IRN_VRFY_IRG(res, irg);
315 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
325 res = optimize_node(res);
326 IRN_VRFY_IRG(res, irg);
331 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
340 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
347 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *memop, ir_node *op1, ir_node *op2)
356 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
357 res = optimize_node(res);
358 IRN_VRFY_IRG(res, irg);
363 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
364 ir_node *memop, ir_node *op1, ir_node *op2)
372 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
373 res = optimize_node(res);
374 IRN_VRFY_IRG(res, irg);
379 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
380 ir_node *op1, ir_node *op2, ir_mode *mode)
387 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
388 res = optimize_node(res);
389 IRN_VRFY_IRG(res, irg);
394 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
395 ir_node *op1, ir_node *op2, ir_mode *mode)
402 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
403 res = optimize_node(res);
404 IRN_VRFY_IRG(res, irg);
409 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
410 ir_node *op1, ir_node *op2, ir_mode *mode)
417 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
418 res = optimize_node (res);
419 IRN_VRFY_IRG(res, irg);
424 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
425 ir_node *op, ir_mode *mode)
429 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
437 ir_node *op, ir_node *k, ir_mode *mode)
444 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
445 res = optimize_node(res);
446 IRN_VRFY_IRG(res, irg);
451 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
452 ir_node *op, ir_node *k, ir_mode *mode)
459 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
460 res = optimize_node(res);
461 IRN_VRFY_IRG(res, irg);
466 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
467 ir_node *op, ir_node *k, ir_mode *mode)
474 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
475 res = optimize_node(res);
476 IRN_VRFY_IRG(res, irg);
481 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
482 ir_node *op, ir_node *k, ir_mode *mode)
489 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
490 res = optimize_node(res);
491 IRN_VRFY_IRG(res, irg);
496 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
497 ir_node *op, ir_mode *mode)
501 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
502 res = optimize_node (res);
503 IRN_VRFY_IRG(res, irg);
508 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
509 ir_node *op1, ir_node *op2)
516 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
517 res = optimize_node(res);
518 IRN_VRFY_IRG(res, irg);
523 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
527 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
528 res = optimize_node (res);
529 IRN_VRFY_IRG (res, irg);
534 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
538 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
539 res->attr.c.kind = dense;
540 res->attr.c.default_proj = 0;
541 res = optimize_node (res);
542 IRN_VRFY_IRG(res, irg);
547 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
548 ir_node *callee, int arity, ir_node **in, type *tp)
555 NEW_ARR_A(ir_node *, r_in, r_arity);
558 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
560 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
562 assert((get_unknown_type() == tp) || is_Method_type(tp));
563 set_Call_type(res, tp);
564 res->attr.call.exc.pin_state = op_pin_state_pinned;
565 res->attr.call.callee_arr = NULL;
566 res = optimize_node(res);
567 IRN_VRFY_IRG(res, irg);
572 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
573 ir_node *store, int arity, ir_node **in)
580 NEW_ARR_A (ir_node *, r_in, r_arity);
582 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
583 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
584 res = optimize_node(res);
585 IRN_VRFY_IRG(res, irg);
590 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
597 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
598 res = optimize_node(res);
599 IRN_VRFY_IRG(res, irg);
604 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
605 ir_node *store, ir_node *adr, ir_mode *mode)
612 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
613 res->attr.load.exc.pin_state = op_pin_state_pinned;
614 res->attr.load.load_mode = mode;
615 res->attr.load.volatility = volatility_non_volatile;
616 res = optimize_node(res);
617 IRN_VRFY_IRG(res, irg);
622 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
623 ir_node *store, ir_node *adr, ir_node *val)
631 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
632 res->attr.store.exc.pin_state = op_pin_state_pinned;
633 res->attr.store.volatility = volatility_non_volatile;
634 res = optimize_node(res);
635 IRN_VRFY_IRG(res, irg);
640 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
641 ir_node *size, type *alloc_type, where_alloc where)
648 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
649 res->attr.a.exc.pin_state = op_pin_state_pinned;
650 res->attr.a.where = where;
651 res->attr.a.type = alloc_type;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
658 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
659 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
667 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
668 res->attr.f.where = where;
669 res->attr.f.type = free_type;
670 res = optimize_node(res);
671 IRN_VRFY_IRG(res, irg);
676 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
677 int arity, ir_node **in, entity *ent)
683 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
686 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
689 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
690 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
691 res->attr.s.ent = ent;
692 res = optimize_node(res);
693 IRN_VRFY_IRG(res, irg);
698 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
699 ir_node *objptr, type *ent)
706 NEW_ARR_A(ir_node *, r_in, r_arity);
710 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
711 res->attr.io.ent = ent;
713 /* res = optimize(res); */
714 IRN_VRFY_IRG(res, irg);
719 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
720 symconst_kind symkind, type *tp) {
724 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
729 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
731 res->attr.i.num = symkind;
732 res->attr.i.sym = value;
735 res = optimize_node(res);
736 IRN_VRFY_IRG(res, irg);
741 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
742 symconst_kind symkind)
744 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
748 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
749 symconst_symbol sym = {(type *)symbol};
750 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
753 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
754 symconst_symbol sym = {(type *)symbol};
755 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
758 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
759 symconst_symbol sym = {symbol};
760 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
763 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
764 symconst_symbol sym = {symbol};
765 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
769 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
773 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
774 res = optimize_node(res);
775 IRN_VRFY_IRG(res, irg);
780 new_rd_Bad (ir_graph *irg)
786 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
788 ir_node *in[2], *res;
792 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
793 res->attr.confirm_cmp = cmp;
794 res = optimize_node (res);
795 IRN_VRFY_IRG(res, irg);
800 new_rd_Unknown (ir_graph *irg, ir_mode *m)
802 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
806 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
811 in[0] = get_Call_ptr(call);
812 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
813 /* res->attr.callbegin.irg = irg; */
814 res->attr.callbegin.call = call;
815 res = optimize_node(res);
816 IRN_VRFY_IRG(res, irg);
821 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
825 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
827 IRN_VRFY_IRG(res, irg);
832 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
836 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
837 irg->end_except = res;
838 IRN_VRFY_IRG (res, irg);
843 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
847 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
848 res = optimize_node(res);
849 IRN_VRFY_IRG(res, irg);
854 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
859 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
860 res->attr.filter.proj = proj;
861 res->attr.filter.in_cg = NULL;
862 res->attr.filter.backedge = NULL;
865 assert(get_Proj_pred(res));
866 assert(get_nodes_block(get_Proj_pred(res)));
868 res = optimize_node(res);
869 IRN_VRFY_IRG(res, irg);
874 new_rd_NoMem (ir_graph *irg) {
879 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
880 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
889 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
892 res = optimize_node(res);
893 IRN_VRFY_IRG(res, irg);
898 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
899 return new_rd_Block(NULL, irg, arity, in);
901 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
902 return new_rd_Start(NULL, irg, block);
904 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
905 return new_rd_End(NULL, irg, block);
907 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
908 return new_rd_Jmp(NULL, irg, block);
910 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
911 return new_rd_Cond(NULL, irg, block, c);
913 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
914 ir_node *store, int arity, ir_node **in) {
915 return new_rd_Return(NULL, irg, block, store, arity, in);
917 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
918 ir_node *store, ir_node *obj) {
919 return new_rd_Raise(NULL, irg, block, store, obj);
921 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
922 ir_mode *mode, tarval *con) {
923 return new_rd_Const(NULL, irg, block, mode, con);
926 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
927 ir_mode *mode, long value) {
928 return new_rd_Const_long(NULL, irg, block, mode, value);
932 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
933 symconst_symbol value, symconst_kind symkind) {
934 return new_rd_SymConst(NULL, irg, block, value, symkind);
936 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
937 ir_node *objptr, int n_index, ir_node **index,
939 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
941 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
943 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
945 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
946 ir_node *callee, int arity, ir_node **in,
948 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
950 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
951 ir_node *op1, ir_node *op2, ir_mode *mode) {
952 return new_rd_Add(NULL, irg, block, op1, op2, mode);
954 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
955 ir_node *op1, ir_node *op2, ir_mode *mode) {
956 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
958 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
959 ir_node *op, ir_mode *mode) {
960 return new_rd_Minus(NULL, irg, block, op, mode);
962 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
963 ir_node *op1, ir_node *op2, ir_mode *mode) {
964 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
966 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
967 ir_node *memop, ir_node *op1, ir_node *op2) {
968 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
970 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
971 ir_node *memop, ir_node *op1, ir_node *op2) {
972 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
974 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
975 ir_node *memop, ir_node *op1, ir_node *op2) {
976 return new_rd_Div(NULL, irg, block, memop, op1, op2);
978 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
979 ir_node *memop, ir_node *op1, ir_node *op2) {
980 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
982 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
983 ir_node *op, ir_mode *mode) {
984 return new_rd_Abs(NULL, irg, block, op, mode);
986 ir_node *new_r_And (ir_graph *irg, ir_node *block,
987 ir_node *op1, ir_node *op2, ir_mode *mode) {
988 return new_rd_And(NULL, irg, block, op1, op2, mode);
990 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
991 ir_node *op1, ir_node *op2, ir_mode *mode) {
992 return new_rd_Or(NULL, irg, block, op1, op2, mode);
994 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
995 ir_node *op1, ir_node *op2, ir_mode *mode) {
996 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
998 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
999 ir_node *op, ir_mode *mode) {
1000 return new_rd_Not(NULL, irg, block, op, mode);
1002 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1003 ir_node *op1, ir_node *op2) {
1004 return new_rd_Cmp(NULL, irg, block, op1, op2);
1006 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1007 ir_node *op, ir_node *k, ir_mode *mode) {
1008 return new_rd_Shl(NULL, irg, block, op, k, mode);
1010 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1011 ir_node *op, ir_node *k, ir_mode *mode) {
1012 return new_rd_Shr(NULL, irg, block, op, k, mode);
1014 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1015 ir_node *op, ir_node *k, ir_mode *mode) {
1016 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1018 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1019 ir_node *op, ir_node *k, ir_mode *mode) {
1020 return new_rd_Rot(NULL, irg, block, op, k, mode);
1022 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1023 ir_node *op, ir_mode *mode) {
1024 return new_rd_Conv(NULL, irg, block, op, mode);
1026 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1027 return new_rd_Cast(NULL, irg, block, op, to_tp);
1029 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1030 ir_node **in, ir_mode *mode) {
1031 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1033 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1034 ir_node *store, ir_node *adr, ir_mode *mode) {
1035 return new_rd_Load(NULL, irg, block, store, adr, mode);
1037 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1038 ir_node *store, ir_node *adr, ir_node *val) {
1039 return new_rd_Store(NULL, irg, block, store, adr, val);
1041 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1042 ir_node *size, type *alloc_type, where_alloc where) {
1043 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1045 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1046 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1047 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1049 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1050 return new_rd_Sync(NULL, irg, block, arity, in);
1052 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1053 ir_mode *mode, long proj) {
1054 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1056 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1058 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1060 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1061 int arity, ir_node **in) {
1062 return new_rd_Tuple(NULL, irg, block, arity, in );
1064 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1065 ir_node *val, ir_mode *mode) {
1066 return new_rd_Id(NULL, irg, block, val, mode);
1068 ir_node *new_r_Bad (ir_graph *irg) {
1069 return new_rd_Bad(irg);
1071 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1072 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1074 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1075 return new_rd_Unknown(irg, m);
1077 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1078 return new_rd_CallBegin(NULL, irg, block, callee);
1080 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1081 return new_rd_EndReg(NULL, irg, block);
1083 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1084 return new_rd_EndExcept(NULL, irg, block);
1086 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1087 return new_rd_Break(NULL, irg, block);
1089 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1090 ir_mode *mode, long proj) {
1091 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1093 ir_node *new_r_NoMem (ir_graph *irg) {
1094 return new_rd_NoMem(irg);
1096 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1097 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1098 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1102 /** ********************/
1103 /** public interfaces */
1104 /** construction tools */
1108 * - create a new Start node in the current block
1110 * @return s - pointer to the created Start node
1115 new_d_Start (dbg_info* db)
1119 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1120 op_Start, mode_T, 0, NULL);
1121 /* res->attr.start.irg = current_ir_graph; */
1123 res = optimize_node(res);
1124 IRN_VRFY_IRG(res, current_ir_graph);
1129 new_d_End (dbg_info* db)
1132 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1133 op_End, mode_X, -1, NULL);
1134 res = optimize_node(res);
1135 IRN_VRFY_IRG(res, current_ir_graph);
1140 /* Constructs a Block with a fixed number of predecessors.
1141 Does set current_block. Can be used with automatic Phi
1142 node construction. */
1144 new_d_Block (dbg_info* db, int arity, ir_node **in)
1148 bool has_unknown = false;
1150 res = new_rd_Block(db, current_ir_graph, arity, in);
1152 /* Create and initialize array for Phi-node construction. */
1153 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1154 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1155 current_ir_graph->n_loc);
1156 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1159 for (i = arity-1; i >= 0; i--)
1160 if (get_irn_op(in[i]) == op_Unknown) {
1165 if (!has_unknown) res = optimize_node(res);
1166 current_ir_graph->current_block = res;
1168 IRN_VRFY_IRG(res, current_ir_graph);
1173 /* ***********************************************************************/
1174 /* Methods necessary for automatic Phi node creation */
1176 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1177 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1178 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1179 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1181 Call Graph: ( A ---> B == A "calls" B)
1183 get_value mature_immBlock
1191 get_r_value_internal |
1195 new_rd_Phi0 new_rd_Phi_in
1197 * *************************************************************************** */
1199 /** Creates a Phi node with 0 predecessors */
1200 static INLINE ir_node *
1201 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1205 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1206 IRN_VRFY_IRG(res, irg);
1210 /* There are two implementations of the Phi node construction. The first
1211 is faster, but does not work for blocks with more than 2 predecessors.
1212 The second works always but is slower and causes more unnecessary Phi
1214 Select the implementations by the following preprocessor flag set in
1216 #if USE_FAST_PHI_CONSTRUCTION
1218 /* This is a stack used for allocating and deallocating nodes in
1219 new_rd_Phi_in. The original implementation used the obstack
1220 to model this stack, now it is explicit. This reduces side effects.
1222 #if USE_EXPLICIT_PHI_IN_STACK
1224 new_Phi_in_stack(void) {
1227 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1229 res->stack = NEW_ARR_F (ir_node *, 0);
1236 free_Phi_in_stack(Phi_in_stack *s) {
1237 DEL_ARR_F(s->stack);
1241 free_to_Phi_in_stack(ir_node *phi) {
1242 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1243 current_ir_graph->Phi_in_stack->pos)
1244 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1246 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1248 (current_ir_graph->Phi_in_stack->pos)++;
1251 static INLINE ir_node *
1252 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1253 int arity, ir_node **in) {
1255 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1256 int pos = current_ir_graph->Phi_in_stack->pos;
1260 /* We need to allocate a new node */
1261 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1262 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1264 /* reuse the old node and initialize it again. */
1267 assert (res->kind == k_ir_node);
1268 assert (res->op == op_Phi);
1272 assert (arity >= 0);
1273 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1274 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1276 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1278 (current_ir_graph->Phi_in_stack->pos)--;
1282 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1284 /* Creates a Phi node with a given, fixed array **in of predecessors.
1285 If the Phi node is unnecessary, as the same value reaches the block
1286 through all control flow paths, it is eliminated and the value
1287 returned directly. This constructor is only intended for use in
1288 the automatic Phi node generation triggered by get_value or mature.
1289 The implementation is quite tricky and depends on the fact, that
1290 the nodes are allocated on a stack:
1291 The in array contains predecessors and NULLs. The NULLs appear,
1292 if get_r_value_internal, that computed the predecessors, reached
1293 the same block on two paths. In this case the same value reaches
1294 this block on both paths, there is no definition in between. We need
1295 not allocate a Phi where these path's merge, but we have to communicate
1296 this fact to the caller. This happens by returning a pointer to the
1297 node the caller _will_ allocate. (Yes, we predict the address. We can
1298 do so because the nodes are allocated on the obstack.) The caller then
1299 finds a pointer to itself and, when this routine is called again,
1302 static INLINE ir_node *
1303 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1306 ir_node *res, *known;
1308 /* Allocate a new node on the obstack. This can return a node to
1309 which some of the pointers in the in-array already point.
1310 Attention: the constructor copies the in array, i.e., the later
1311 changes to the array in this routine do not affect the
1312 constructed node! If the in array contains NULLs, there will be
1313 missing predecessors in the returned node. Is this a possible
1314 internal state of the Phi node generation? */
1315 #if USE_EXPLICIT_PHI_IN_STACK
1316 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1318 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1319 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1322 /* The in-array can contain NULLs. These were returned by
1323 get_r_value_internal if it reached the same block/definition on a
1324 second path. The NULLs are replaced by the node itself to
1325 simplify the test in the next loop. */
1326 for (i = 0; i < ins; ++i) {
1331 /* This loop checks whether the Phi has more than one predecessor.
1332 If so, it is a real Phi node and we break the loop. Else the Phi
1333 node merges the same definition on several paths and therefore is
1335 for (i = 0; i < ins; ++i) {
1336 if (in[i] == res || in[i] == known)
1345 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1347 #if USE_EXPLICIT_PHI_IN_STACK
1348 free_to_Phi_in_stack(res);
1350 edges_node_deleted(res, current_ir_graph);
1351 obstack_free(current_ir_graph->obst, res);
1355 res = optimize_node (res);
1356 IRN_VRFY_IRG(res, irg);
1359 /* return the pointer to the Phi node. This node might be deallocated! */
1364 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1367 allocates and returns this node. The routine called to allocate the
1368 node might optimize it away and return a real value, or even a pointer
1369 to a deallocated Phi node on top of the obstack!
1370 This function is called with an in-array of proper size. **/
1372 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1374 ir_node *prevBlock, *res;
1377 /* This loop goes to all predecessor blocks of the block the Phi node is in
1378 and there finds the operands of the Phi node by calling
1379 get_r_value_internal. */
1380 for (i = 1; i <= ins; ++i) {
1381 assert (block->in[i]);
1382 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1384 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1387 /* After collecting all predecessors into the array nin a new Phi node
1388 with these predecessors is created. This constructor contains an
1389 optimization: If all predecessors of the Phi node are identical it
1390 returns the only operand instead of a new Phi node. If the value
1391 passes two different control flow edges without being defined, and
1392 this is the second path treated, a pointer to the node that will be
1393 allocated for the first path (recursion) is returned. We already
1394 know the address of this node, as it is the next node to be allocated
1395 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1396 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1398 /* Now we now the value for "pos" and can enter it in the array with
1399 all known local variables. Attention: this might be a pointer to
1400 a node, that later will be allocated!!! See new_rd_Phi_in.
1401 If this is called in mature, after some set_value in the same block,
1402 the proper value must not be overwritten:
1404 get_value (makes Phi0, put's it into graph_arr)
1405 set_value (overwrites Phi0 in graph_arr)
1406 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1409 if (!block->attr.block.graph_arr[pos]) {
1410 block->attr.block.graph_arr[pos] = res;
1412 /* printf(" value already computed by %s\n",
1413 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1419 /* This function returns the last definition of a variable. In case
1420 this variable was last defined in a previous block, Phi nodes are
1421 inserted. If the part of the firm graph containing the definition
1422 is not yet constructed, a dummy Phi node is returned. */
1424 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1427 /* There are 4 cases to treat.
1429 1. The block is not mature and we visit it the first time. We can not
1430 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1431 predecessors is returned. This node is added to the linked list (field
1432 "link") of the containing block to be completed when this block is
1433 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1436 2. The value is already known in this block, graph_arr[pos] is set and we
1437 visit the block the first time. We can return the value without
1438 creating any new nodes.
1440 3. The block is mature and we visit it the first time. A Phi node needs
1441 to be created (phi_merge). If the Phi is not needed, as all it's
1442 operands are the same value reaching the block through different
1443 paths, it's optimized away and the value itself is returned.
1445 4. The block is mature, and we visit it the second time. Now two
1446 subcases are possible:
1447 * The value was computed completely the last time we were here. This
1448 is the case if there is no loop. We can return the proper value.
1449 * The recursion that visited this node and set the flag did not
1450 return yet. We are computing a value in a loop and need to
1451 break the recursion without knowing the result yet.
1452 @@@ strange case. Straight forward we would create a Phi before
1453 starting the computation of it's predecessors. In this case we will
1454 find a Phi here in any case. The problem is that this implementation
1455 only creates a Phi after computing the predecessors, so that it is
1456 hard to compute self references of this Phi. @@@
1457 There is no simple check for the second subcase. Therefore we check
1458 for a second visit and treat all such cases as the second subcase.
1459 Anyways, the basic situation is the same: we reached a block
1460 on two paths without finding a definition of the value: No Phi
1461 nodes are needed on both paths.
1462 We return this information "Two paths, no Phi needed" by a very tricky
1463 implementation that relies on the fact that an obstack is a stack and
1464 will return a node with the same address on different allocations.
1465 Look also at phi_merge and new_rd_phi_in to understand this.
1466 @@@ Unfortunately this does not work, see testprogram
1467 three_cfpred_example.
1471 /* case 4 -- already visited. */
1472 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1474 /* visited the first time */
1475 set_irn_visited(block, get_irg_visited(current_ir_graph));
1477 /* Get the local valid value */
1478 res = block->attr.block.graph_arr[pos];
1480 /* case 2 -- If the value is actually computed, return it. */
1481 if (res) return res;
1483 if (block->attr.block.matured) { /* case 3 */
1485 /* The Phi has the same amount of ins as the corresponding block. */
1486 int ins = get_irn_arity(block);
1488 NEW_ARR_A (ir_node *, nin, ins);
1490 /* Phi merge collects the predecessors and then creates a node. */
1491 res = phi_merge (block, pos, mode, nin, ins);
1493 } else { /* case 1 */
1494 /* The block is not mature, we don't know how many in's are needed. A Phi
1495 with zero predecessors is created. Such a Phi node is called Phi0
1496 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1497 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1499 The Phi0 has to remember the pos of it's internal value. If the real
1500 Phi is computed, pos is used to update the array with the local
1503 res = new_rd_Phi0 (current_ir_graph, block, mode);
1504 res->attr.phi0_pos = pos;
1505 res->link = block->link;
1509 /* If we get here, the frontend missed a use-before-definition error */
1512 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1513 assert (mode->code >= irm_F && mode->code <= irm_P);
1514 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1515 tarval_mode_null[mode->code]);
1518 /* The local valid value is available now. */
1519 block->attr.block.graph_arr[pos] = res;
1527 it starts the recursion. This causes an Id at the entry of
1528 every block that has no definition of the value! **/
1530 #if USE_EXPLICIT_PHI_IN_STACK
1532 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1533 void free_Phi_in_stack(Phi_in_stack *s) { }
1536 static INLINE ir_node *
1537 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1538 ir_node **in, int ins, ir_node *phi0)
1541 ir_node *res, *known;
1543 /* Allocate a new node on the obstack. The allocation copies the in
1545 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1546 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1548 /* This loop checks whether the Phi has more than one predecessor.
1549 If so, it is a real Phi node and we break the loop. Else the
1550 Phi node merges the same definition on several paths and therefore
1551 is not needed. Don't consider Bad nodes! */
1553 for (i=0; i < ins; ++i)
1557 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1559 /* Optimize self referencing Phis: We can't detect them yet properly, as
1560 they still refer to the Phi0 they will replace. So replace right now. */
1561 if (phi0 && in[i] == phi0) in[i] = res;
1563 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1571 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1574 edges_node_deleted(res, current_ir_graph);
1575 obstack_free (current_ir_graph->obst, res);
1576 if (is_Phi(known)) {
1577 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1578 order, an enclosing Phi know may get superfluous. */
1579 res = optimize_in_place_2(known);
1581 exchange(known, res);
1587 /* A undefined value, e.g., in unreachable code. */
1591 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1592 IRN_VRFY_IRG(res, irg);
1593 /* Memory Phis in endless loops must be kept alive.
1594 As we can't distinguish these easily we keep all of them alive. */
1595 if ((res->op == op_Phi) && (mode == mode_M))
1596 add_End_keepalive(irg->end, res);
1603 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1605 #if PRECISE_EXC_CONTEXT
1607 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1609 /* Construct a new frag_array for node n.
1610 Copy the content from the current graph_arr of the corresponding block:
1611 this is the current state.
1612 Set ProjM(n) as current memory state.
1613 Further the last entry in frag_arr of current block points to n. This
1614 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1616 static INLINE ir_node ** new_frag_arr (ir_node *n)
1621 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1622 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1623 sizeof(ir_node *)*current_ir_graph->n_loc);
1625 /* turn off optimization before allocating Proj nodes, as res isn't
1627 opt = get_opt_optimize(); set_optimize(0);
1628 /* Here we rely on the fact that all frag ops have Memory as first result! */
1629 if (get_irn_op(n) == op_Call)
1630 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1632 assert((pn_Quot_M == pn_DivMod_M) &&
1633 (pn_Quot_M == pn_Div_M) &&
1634 (pn_Quot_M == pn_Mod_M) &&
1635 (pn_Quot_M == pn_Load_M) &&
1636 (pn_Quot_M == pn_Store_M) &&
1637 (pn_Quot_M == pn_Alloc_M) );
1638 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1642 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1647 * returns the frag_arr from a node
1649 static INLINE ir_node **
1650 get_frag_arr (ir_node *n) {
1651 switch (get_irn_opcode(n)) {
1653 return n->attr.call.exc.frag_arr;
1655 return n->attr.a.exc.frag_arr;
1657 return n->attr.load.exc.frag_arr;
1659 return n->attr.store.exc.frag_arr;
1661 return n->attr.except.frag_arr;
1666 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1668 if (!frag_arr[pos]) frag_arr[pos] = val;
1669 if (frag_arr[current_ir_graph->n_loc - 1]) {
1670 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1671 assert(arr != frag_arr && "Endless recursion detected");
1672 set_frag_value(arr, pos, val);
1677 for (i = 0; i < 1000; ++i) {
1678 if (!frag_arr[pos]) {
1679 frag_arr[pos] = val;
1681 if (frag_arr[current_ir_graph->n_loc - 1]) {
1682 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1688 assert(0 && "potential endless recursion");
1693 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1697 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1699 frag_arr = get_frag_arr(cfOp);
1700 res = frag_arr[pos];
1702 if (block->attr.block.graph_arr[pos]) {
1703 /* There was a set_value after the cfOp and no get_value before that
1704 set_value. We must build a Phi node now. */
1705 if (block->attr.block.matured) {
1706 int ins = get_irn_arity(block);
1708 NEW_ARR_A (ir_node *, nin, ins);
1709 res = phi_merge(block, pos, mode, nin, ins);
1711 res = new_rd_Phi0 (current_ir_graph, block, mode);
1712 res->attr.phi0_pos = pos;
1713 res->link = block->link;
1717 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1718 but this should be better: (remove comment if this works) */
1719 /* It's a Phi, we can write this into all graph_arrs with NULL */
1720 set_frag_value(block->attr.block.graph_arr, pos, res);
1722 res = get_r_value_internal(block, pos, mode);
1723 set_frag_value(block->attr.block.graph_arr, pos, res);
1731 computes the predecessors for the real phi node, and then
1732 allocates and returns this node. The routine called to allocate the
1733 node might optimize it away and return a real value.
1734 This function must be called with an in-array of proper size. **/
1736 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1738 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1741 /* If this block has no value at pos create a Phi0 and remember it
1742 in graph_arr to break recursions.
1743 Else we may not set graph_arr as there a later value is remembered. */
1745 if (!block->attr.block.graph_arr[pos]) {
1746 if (block == get_irg_start_block(current_ir_graph)) {
1747 /* Collapsing to Bad tarvals is no good idea.
1748 So we call a user-supplied routine here that deals with this case as
1749 appropriate for the given language. Sorryly the only help we can give
1750 here is the position.
1752 Even if all variables are defined before use, it can happen that
1753 we get to the start block, if a cond has been replaced by a tuple
1754 (bad, jmp). In this case we call the function needlessly, eventually
1755 generating an non existant error.
1756 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1759 if (default_initialize_local_variable)
1760 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1762 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1763 /* We don't need to care about exception ops in the start block.
1764 There are none by definition. */
1765 return block->attr.block.graph_arr[pos];
1767 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1768 block->attr.block.graph_arr[pos] = phi0;
1769 #if PRECISE_EXC_CONTEXT
1770 if (get_opt_precise_exc_context()) {
1771 /* Set graph_arr for fragile ops. Also here we should break recursion.
1772 We could choose a cyclic path through an cfop. But the recursion would
1773 break at some point. */
1774 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1780 /* This loop goes to all predecessor blocks of the block the Phi node
1781 is in and there finds the operands of the Phi node by calling
1782 get_r_value_internal. */
1783 for (i = 1; i <= ins; ++i) {
1784 prevCfOp = skip_Proj(block->in[i]);
1786 if (is_Bad(prevCfOp)) {
1787 /* In case a Cond has been optimized we would get right to the start block
1788 with an invalid definition. */
1789 nin[i-1] = new_Bad();
1792 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1794 if (!is_Bad(prevBlock)) {
1795 #if PRECISE_EXC_CONTEXT
1796 if (get_opt_precise_exc_context() &&
1797 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1798 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1799 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1802 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1804 nin[i-1] = new_Bad();
1808 /* We want to pass the Phi0 node to the constructor: this finds additional
1809 optimization possibilities.
1810 The Phi0 node either is allocated in this function, or it comes from
1811 a former call to get_r_value_internal. In this case we may not yet
1812 exchange phi0, as this is done in mature_immBlock. */
1814 phi0_all = block->attr.block.graph_arr[pos];
1815 if (!((get_irn_op(phi0_all) == op_Phi) &&
1816 (get_irn_arity(phi0_all) == 0) &&
1817 (get_nodes_block(phi0_all) == block)))
1823 /* After collecting all predecessors into the array nin a new Phi node
1824 with these predecessors is created. This constructor contains an
1825 optimization: If all predecessors of the Phi node are identical it
1826 returns the only operand instead of a new Phi node. */
1827 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1829 /* In case we allocated a Phi0 node at the beginning of this procedure,
1830 we need to exchange this Phi0 with the real Phi. */
1832 exchange(phi0, res);
1833 block->attr.block.graph_arr[pos] = res;
1834 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1835 only an optimization. */
1841 /* This function returns the last definition of a variable. In case
1842 this variable was last defined in a previous block, Phi nodes are
1843 inserted. If the part of the firm graph containing the definition
1844 is not yet constructed, a dummy Phi node is returned. */
1846 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1849 /* There are 4 cases to treat.
1851 1. The block is not mature and we visit it the first time. We can not
1852 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1853 predecessors is returned. This node is added to the linked list (field
1854 "link") of the containing block to be completed when this block is
1855 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1858 2. The value is already known in this block, graph_arr[pos] is set and we
1859 visit the block the first time. We can return the value without
1860 creating any new nodes.
1862 3. The block is mature and we visit it the first time. A Phi node needs
1863 to be created (phi_merge). If the Phi is not needed, as all it's
1864 operands are the same value reaching the block through different
1865 paths, it's optimized away and the value itself is returned.
1867 4. The block is mature, and we visit it the second time. Now two
1868 subcases are possible:
1869 * The value was computed completely the last time we were here. This
1870 is the case if there is no loop. We can return the proper value.
1871 * The recursion that visited this node and set the flag did not
1872 return yet. We are computing a value in a loop and need to
1873 break the recursion. This case only happens if we visited
1874 the same block with phi_merge before, which inserted a Phi0.
1875 So we return the Phi0.
1878 /* case 4 -- already visited. */
1879 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1880 /* As phi_merge allocates a Phi0 this value is always defined. Here
1881 is the critical difference of the two algorithms. */
1882 assert(block->attr.block.graph_arr[pos]);
1883 return block->attr.block.graph_arr[pos];
1886 /* visited the first time */
1887 set_irn_visited(block, get_irg_visited(current_ir_graph));
1889 /* Get the local valid value */
1890 res = block->attr.block.graph_arr[pos];
1892 /* case 2 -- If the value is actually computed, return it. */
1893 if (res) { return res; };
1895 if (block->attr.block.matured) { /* case 3 */
1897 /* The Phi has the same amount of ins as the corresponding block. */
1898 int ins = get_irn_arity(block);
1900 NEW_ARR_A (ir_node *, nin, ins);
1902 /* Phi merge collects the predecessors and then creates a node. */
1903 res = phi_merge (block, pos, mode, nin, ins);
1905 } else { /* case 1 */
1906 /* The block is not mature, we don't know how many in's are needed. A Phi
1907 with zero predecessors is created. Such a Phi node is called Phi0
1908 node. The Phi0 is then added to the list of Phi0 nodes in this block
1909 to be matured by mature_immBlock later.
1910 The Phi0 has to remember the pos of it's internal value. If the real
1911 Phi is computed, pos is used to update the array with the local
1913 res = new_rd_Phi0 (current_ir_graph, block, mode);
1914 res->attr.phi0_pos = pos;
1915 res->link = block->link;
1919 /* If we get here, the frontend missed a use-before-definition error */
1922 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1923 assert (mode->code >= irm_F && mode->code <= irm_P);
1924 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1925 get_mode_null(mode));
1928 /* The local valid value is available now. */
1929 block->attr.block.graph_arr[pos] = res;
1934 #endif /* USE_FAST_PHI_CONSTRUCTION */
1936 /* ************************************************************************** */
1938 /** Finalize a Block node, when all control flows are known. */
1939 /** Acceptable parameters are only Block nodes. */
1941 mature_immBlock (ir_node *block)
1948 assert (get_irn_opcode(block) == iro_Block);
1949 /* @@@ should be commented in
1950 assert (!get_Block_matured(block) && "Block already matured"); */
1952 if (!get_Block_matured(block)) {
1953 ins = ARR_LEN (block->in)-1;
1954 /* Fix block parameters */
1955 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1957 /* An array for building the Phi nodes. */
1958 NEW_ARR_A (ir_node *, nin, ins);
1960 /* Traverse a chain of Phi nodes attached to this block and mature
1962 for (n = block->link; n; n=next) {
1963 inc_irg_visited(current_ir_graph);
1965 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1968 block->attr.block.matured = 1;
1970 /* Now, as the block is a finished firm node, we can optimize it.
1971 Since other nodes have been allocated since the block was created
1972 we can not free the node on the obstack. Therefore we have to call
1974 Unfortunately the optimization does not change a lot, as all allocated
1975 nodes refer to the unoptimized node.
1976 We can call _2, as global cse has no effect on blocks. */
1977 block = optimize_in_place_2(block);
1978 IRN_VRFY_IRG(block, current_ir_graph);
1983 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1985 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1990 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1992 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1997 new_d_Const_long(dbg_info* db, ir_mode *mode, long value)
1999 return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value);
2003 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
2005 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
2011 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
2013 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
2018 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
2020 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2025 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2028 assert(arg->op == op_Cond);
2029 arg->attr.c.kind = fragmentary;
2030 arg->attr.c.default_proj = max_proj;
2031 res = new_Proj (arg, mode_X, max_proj);
2036 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2038 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2043 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2045 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2049 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2051 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2056 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2058 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2063 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2065 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2071 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2073 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2078 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2080 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2085 * allocate the frag array
2087 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2088 if (get_opt_precise_exc_context()) {
2089 if ((current_ir_graph->phase_state == phase_building) &&
2090 (get_irn_op(res) == op) && /* Could be optimized away. */
2091 !*frag_store) /* Could be a cse where the arr is already set. */ {
2092 *frag_store = new_frag_arr(res);
2099 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2102 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2104 res->attr.except.pin_state = op_pin_state_pinned;
2105 #if PRECISE_EXC_CONTEXT
2106 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2113 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2116 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2118 res->attr.except.pin_state = op_pin_state_pinned;
2119 #if PRECISE_EXC_CONTEXT
2120 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2127 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2130 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2132 res->attr.except.pin_state = op_pin_state_pinned;
2133 #if PRECISE_EXC_CONTEXT
2134 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2141 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2144 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2146 res->attr.except.pin_state = op_pin_state_pinned;
2147 #if PRECISE_EXC_CONTEXT
2148 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2155 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2157 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2162 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2164 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2169 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2171 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2176 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2178 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2183 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2185 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2190 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2192 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2197 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2199 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2204 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2206 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2211 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2213 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2218 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2220 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2225 new_d_Jmp (dbg_info* db)
2227 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2231 new_d_Cond (dbg_info* db, ir_node *c)
2233 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2237 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2241 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2242 store, callee, arity, in, tp);
2243 #if PRECISE_EXC_CONTEXT
2244 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2251 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2253 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2258 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2260 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2265 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2268 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2270 #if PRECISE_EXC_CONTEXT
2271 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2278 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2281 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2283 #if PRECISE_EXC_CONTEXT
2284 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2291 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2295 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2296 store, size, alloc_type, where);
2297 #if PRECISE_EXC_CONTEXT
2298 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2305 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2306 ir_node *size, type *free_type, where_alloc where)
2308 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2309 store, ptr, size, free_type, where);
2313 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2314 /* GL: objptr was called frame before. Frame was a bad choice for the name
2315 as the operand could as well be a pointer to a dynamic object. */
2317 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2318 store, objptr, 0, NULL, ent);
2322 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2324 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2325 store, objptr, n_index, index, sel);
2329 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2331 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2332 store, objptr, ent));
2336 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2338 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2343 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2345 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2350 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2352 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2360 return _new_d_Bad();
2364 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2366 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2371 new_d_Unknown (ir_mode *m)
2373 return new_rd_Unknown(current_ir_graph, m);
2377 new_d_CallBegin (dbg_info *db, ir_node *call)
2380 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2385 new_d_EndReg (dbg_info *db)
2388 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2393 new_d_EndExcept (dbg_info *db)
2396 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2401 new_d_Break (dbg_info *db)
2403 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2407 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2409 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2416 return _new_d_NoMem();
2420 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2421 ir_node *ir_true, ir_mode *mode) {
2422 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2423 sel, ir_false, ir_true, mode);
2426 /* ********************************************************************* */
2427 /* Comfortable interface with automatic Phi node construction. */
2428 /* (Uses also constructors of ?? interface, except new_Block. */
2429 /* ********************************************************************* */
2431 /* * Block construction **/
2432 /* immature Block without predecessors */
2433 ir_node *new_d_immBlock (dbg_info* db) {
2436 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2437 /* creates a new dynamic in-array as length of in is -1 */
2438 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2439 current_ir_graph->current_block = res;
2440 res->attr.block.matured = 0;
2441 res->attr.block.dead = 0;
2442 /* res->attr.block.exc = exc_normal; */
2443 /* res->attr.block.handler_entry = 0; */
2444 res->attr.block.irg = current_ir_graph;
2445 res->attr.block.backedge = NULL;
2446 res->attr.block.in_cg = NULL;
2447 res->attr.block.cg_backedge = NULL;
2448 set_Block_block_visited(res, 0);
2450 /* Create and initialize array for Phi-node construction. */
2451 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2452 current_ir_graph->n_loc);
2453 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2455 /* Immature block may not be optimized! */
2456 IRN_VRFY_IRG(res, current_ir_graph);
2462 new_immBlock (void) {
2463 return new_d_immBlock(NULL);
2466 /* add an adge to a jmp/control flow node */
2468 add_immBlock_pred (ir_node *block, ir_node *jmp)
2470 if (block->attr.block.matured) {
2471 assert(0 && "Error: Block already matured!\n");
2474 assert(jmp != NULL);
2475 ARR_APP1(ir_node *, block->in, jmp);
2479 /* changing the current block */
2481 set_cur_block (ir_node *target)
2483 current_ir_graph->current_block = target;
2486 /* ************************ */
2487 /* parameter administration */
2489 /* get a value from the parameter array from the current block by its index */
2491 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2493 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2494 inc_irg_visited(current_ir_graph);
2496 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2498 /* get a value from the parameter array from the current block by its index */
2500 get_value (int pos, ir_mode *mode)
2502 return get_d_value(NULL, pos, mode);
2505 /* set a value at position pos in the parameter array from the current block */
2507 set_value (int pos, ir_node *value)
2509 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2510 assert(pos+1 < current_ir_graph->n_loc);
2511 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2514 /* get the current store */
2518 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2519 /* GL: one could call get_value instead */
2520 inc_irg_visited(current_ir_graph);
2521 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2524 /* set the current store */
2526 set_store (ir_node *store)
2528 /* GL: one could call set_value instead */
2529 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2530 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2534 keep_alive (ir_node *ka)
2536 add_End_keepalive(current_ir_graph->end, ka);
2539 /** Useful access routines **/
2540 /* Returns the current block of the current graph. To set the current
2541 block use set_cur_block. */
2542 ir_node *get_cur_block() {
2543 return get_irg_current_block(current_ir_graph);
2546 /* Returns the frame type of the current graph */
2547 type *get_cur_frame_type() {
2548 return get_irg_frame_type(current_ir_graph);
2552 /* ********************************************************************* */
2555 /* call once for each run of the library */
2557 init_cons(uninitialized_local_variable_func_t *func)
2559 default_initialize_local_variable = func;
2562 /* call for each graph */
2564 irg_finalize_cons (ir_graph *irg) {
2565 irg->phase_state = phase_high;
2569 irp_finalize_cons (void) {
2570 int i, n_irgs = get_irp_n_irgs();
2571 for (i = 0; i < n_irgs; i++) {
2572 irg_finalize_cons(get_irp_irg(i));
2574 irp->phase_state = phase_high;\
2580 ir_node *new_Block(int arity, ir_node **in) {
2581 return new_d_Block(NULL, arity, in);
2583 ir_node *new_Start (void) {
2584 return new_d_Start(NULL);
2586 ir_node *new_End (void) {
2587 return new_d_End(NULL);
2589 ir_node *new_Jmp (void) {
2590 return new_d_Jmp(NULL);
2592 ir_node *new_Cond (ir_node *c) {
2593 return new_d_Cond(NULL, c);
2595 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2596 return new_d_Return(NULL, store, arity, in);
2598 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2599 return new_d_Raise(NULL, store, obj);
2601 ir_node *new_Const (ir_mode *mode, tarval *con) {
2602 return new_d_Const(NULL, mode, con);
2605 ir_node *new_Const_long(ir_mode *mode, long value)
2607 return new_d_Const_long(NULL, mode, value);
2610 ir_node *new_Const_type(tarval *con, type *tp) {
2611 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2614 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2615 return new_d_SymConst(NULL, value, kind);
2617 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2618 return new_d_simpleSel(NULL, store, objptr, ent);
2620 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2622 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2624 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2625 return new_d_InstOf (NULL, store, objptr, ent);
2627 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2629 return new_d_Call(NULL, store, callee, arity, in, tp);
2631 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2632 return new_d_Add(NULL, op1, op2, mode);
2634 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2635 return new_d_Sub(NULL, op1, op2, mode);
2637 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2638 return new_d_Minus(NULL, op, mode);
2640 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2641 return new_d_Mul(NULL, op1, op2, mode);
2643 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2644 return new_d_Quot(NULL, memop, op1, op2);
2646 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2647 return new_d_DivMod(NULL, memop, op1, op2);
2649 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2650 return new_d_Div(NULL, memop, op1, op2);
2652 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2653 return new_d_Mod(NULL, memop, op1, op2);
2655 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2656 return new_d_Abs(NULL, op, mode);
2658 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2659 return new_d_And(NULL, op1, op2, mode);
2661 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2662 return new_d_Or(NULL, op1, op2, mode);
2664 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2665 return new_d_Eor(NULL, op1, op2, mode);
2667 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2668 return new_d_Not(NULL, op, mode);
2670 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2671 return new_d_Shl(NULL, op, k, mode);
2673 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2674 return new_d_Shr(NULL, op, k, mode);
2676 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2677 return new_d_Shrs(NULL, op, k, mode);
2679 #define new_Rotate new_Rot
2680 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2681 return new_d_Rot(NULL, op, k, mode);
2683 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2684 return new_d_Cmp(NULL, op1, op2);
2686 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2687 return new_d_Conv(NULL, op, mode);
2689 ir_node *new_Cast (ir_node *op, type *to_tp) {
2690 return new_d_Cast(NULL, op, to_tp);
2692 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2693 return new_d_Phi(NULL, arity, in, mode);
2695 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2696 return new_d_Load(NULL, store, addr, mode);
2698 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2699 return new_d_Store(NULL, store, addr, val);
2701 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2702 where_alloc where) {
2703 return new_d_Alloc(NULL, store, size, alloc_type, where);
2705 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2706 type *free_type, where_alloc where) {
2707 return new_d_Free(NULL, store, ptr, size, free_type, where);
2709 ir_node *new_Sync (int arity, ir_node **in) {
2710 return new_d_Sync(NULL, arity, in);
2712 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2713 return new_d_Proj(NULL, arg, mode, proj);
2715 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2716 return new_d_defaultProj(NULL, arg, max_proj);
2718 ir_node *new_Tuple (int arity, ir_node **in) {
2719 return new_d_Tuple(NULL, arity, in);
2721 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2722 return new_d_Id(NULL, val, mode);
2724 ir_node *new_Bad (void) {
2727 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2728 return new_d_Confirm (NULL, val, bound, cmp);
2730 ir_node *new_Unknown(ir_mode *m) {
2731 return new_d_Unknown(m);
2733 ir_node *new_CallBegin (ir_node *callee) {
2734 return new_d_CallBegin(NULL, callee);
2736 ir_node *new_EndReg (void) {
2737 return new_d_EndReg(NULL);
2739 ir_node *new_EndExcept (void) {
2740 return new_d_EndExcept(NULL);
2742 ir_node *new_Break (void) {
2743 return new_d_Break(NULL);
2745 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2746 return new_d_Filter(NULL, arg, mode, proj);
2748 ir_node *new_NoMem (void) {
2749 return new_d_NoMem();
2751 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2752 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);