3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
21 # include "ircons_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /* when we need verifying */
45 # define IRN_VRFY_IRG(res, irg)
47 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
51 * language dependant initialization variable
53 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
55 /* -------------------------------------------- */
56 /* privat interfaces, for professional use only */
57 /* -------------------------------------------- */
59 /* Constructs a Block with a fixed number of predecessors.
60 Does not set current_block. Can not be used with automatic
61 Phi node construction. */
63 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
67 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
68 set_Block_matured(res, 1);
69 set_Block_block_visited(res, 0);
71 /* res->attr.block.exc = exc_normal; */
72 /* res->attr.block.handler_entry = 0; */
73 res->attr.block.dead = 0;
74 res->attr.block.irg = irg;
75 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
76 res->attr.block.in_cg = NULL;
77 res->attr.block.cg_backedge = NULL;
79 IRN_VRFY_IRG(res, irg);
84 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
88 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
89 /* res->attr.start.irg = irg; */
91 IRN_VRFY_IRG(res, irg);
96 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
100 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
102 IRN_VRFY_IRG(res, irg);
106 /* Creates a Phi node with all predecessors. Calling this constructor
107 is only allowed if the corresponding block is mature. */
109 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
113 bool has_unknown = false;
115 /* Don't assert that block matured: the use of this constructor is strongly
117 if ( get_Block_matured(block) )
118 assert( get_irn_arity(block) == arity );
120 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
122 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
124 for (i = arity-1; i >= 0; i--)
125 if (get_irn_op(in[i]) == op_Unknown) {
130 if (!has_unknown) res = optimize_node (res);
131 IRN_VRFY_IRG(res, irg);
133 /* Memory Phis in endless loops must be kept alive.
134 As we can't distinguish these easily we keep all of them alive. */
135 if ((res->op == op_Phi) && (mode == mode_M))
136 add_End_keepalive(irg->end, res);
141 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
145 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
146 res->attr.con.tv = con;
147 set_Const_type(res, tp); /* Call method because of complex assertion. */
148 res = optimize_node (res);
149 assert(get_Const_type(res) == tp);
150 IRN_VRFY_IRG(res, irg);
156 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
158 type *tp = unknown_type;
159 /* removing this somehow causes errors in jack. */
160 return new_rd_Const_type (db, irg, block, mode, con, tp);
164 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
168 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
169 res = optimize_node(res);
170 IRN_VRFY_IRG(res, irg);
175 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
180 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
181 res->attr.proj = proj;
184 assert(get_Proj_pred(res));
185 assert(get_nodes_block(get_Proj_pred(res)));
187 res = optimize_node(res);
189 IRN_VRFY_IRG(res, irg);
195 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
199 assert(arg->op == op_Cond);
200 arg->attr.c.kind = fragmentary;
201 arg->attr.c.default_proj = max_proj;
202 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
207 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
211 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
212 res = optimize_node(res);
213 IRN_VRFY_IRG(res, irg);
218 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
222 assert(is_atomic_type(to_tp));
224 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
225 res->attr.cast.totype = to_tp;
226 res = optimize_node(res);
227 IRN_VRFY_IRG(res, irg);
232 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
236 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
237 res = optimize_node (res);
238 IRN_VRFY_IRG(res, irg);
243 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
244 ir_node *op1, ir_node *op2, ir_mode *mode)
251 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
252 res = optimize_node(res);
253 IRN_VRFY_IRG(res, irg);
258 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
259 ir_node *op1, ir_node *op2, ir_mode *mode)
266 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
267 res = optimize_node (res);
268 IRN_VRFY_IRG(res, irg);
273 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
274 ir_node *op, ir_mode *mode)
278 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
279 res = optimize_node(res);
280 IRN_VRFY_IRG(res, irg);
285 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
286 ir_node *op1, ir_node *op2, ir_mode *mode)
293 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
294 res = optimize_node(res);
295 IRN_VRFY_IRG(res, irg);
300 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *memop, ir_node *op1, ir_node *op2)
309 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
310 res = optimize_node(res);
311 IRN_VRFY_IRG(res, irg);
316 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
317 ir_node *memop, ir_node *op1, ir_node *op2)
325 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
326 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
333 ir_node *memop, ir_node *op1, ir_node *op2)
341 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
342 res = optimize_node(res);
343 IRN_VRFY_IRG(res, irg);
348 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
349 ir_node *memop, ir_node *op1, ir_node *op2)
357 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
358 res = optimize_node(res);
359 IRN_VRFY_IRG(res, irg);
364 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
365 ir_node *op1, ir_node *op2, ir_mode *mode)
372 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
373 res = optimize_node(res);
374 IRN_VRFY_IRG(res, irg);
379 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
380 ir_node *op1, ir_node *op2, ir_mode *mode)
387 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
388 res = optimize_node(res);
389 IRN_VRFY_IRG(res, irg);
394 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
395 ir_node *op1, ir_node *op2, ir_mode *mode)
402 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
403 res = optimize_node (res);
404 IRN_VRFY_IRG(res, irg);
409 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
410 ir_node *op, ir_mode *mode)
414 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
415 res = optimize_node(res);
416 IRN_VRFY_IRG(res, irg);
421 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
422 ir_node *op, ir_node *k, ir_mode *mode)
429 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
437 ir_node *op, ir_node *k, ir_mode *mode)
444 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
445 res = optimize_node(res);
446 IRN_VRFY_IRG(res, irg);
451 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
452 ir_node *op, ir_node *k, ir_mode *mode)
459 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
460 res = optimize_node(res);
461 IRN_VRFY_IRG(res, irg);
466 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
467 ir_node *op, ir_node *k, ir_mode *mode)
474 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
475 res = optimize_node(res);
476 IRN_VRFY_IRG(res, irg);
481 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
482 ir_node *op, ir_mode *mode)
486 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
487 res = optimize_node (res);
488 IRN_VRFY_IRG(res, irg);
493 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
494 ir_node *op1, ir_node *op2)
501 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
502 res = optimize_node(res);
503 IRN_VRFY_IRG(res, irg);
508 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
512 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
513 res = optimize_node (res);
514 IRN_VRFY_IRG (res, irg);
519 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
523 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
524 res->attr.c.kind = dense;
525 res->attr.c.default_proj = 0;
526 res = optimize_node (res);
527 IRN_VRFY_IRG(res, irg);
532 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
533 ir_node *callee, int arity, ir_node **in, type *tp)
540 NEW_ARR_A(ir_node *, r_in, r_arity);
543 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
545 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
547 assert((get_unknown_type() == tp) || is_method_type(tp));
548 set_Call_type(res, tp);
549 res->attr.call.exc.pin_state = op_pin_state_pinned;
550 res->attr.call.callee_arr = NULL;
551 res = optimize_node(res);
552 IRN_VRFY_IRG(res, irg);
557 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
558 ir_node *store, int arity, ir_node **in)
565 NEW_ARR_A (ir_node *, r_in, r_arity);
567 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
568 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
569 res = optimize_node(res);
570 IRN_VRFY_IRG(res, irg);
575 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
582 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
583 res = optimize_node(res);
584 IRN_VRFY_IRG(res, irg);
589 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
590 ir_node *store, ir_node *adr, ir_mode *mode)
597 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
598 res->attr.load.exc.pin_state = op_pin_state_pinned;
599 res->attr.load.load_mode = mode;
600 res->attr.load.volatility = volatility_non_volatile;
601 res = optimize_node(res);
602 IRN_VRFY_IRG(res, irg);
607 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
608 ir_node *store, ir_node *adr, ir_node *val)
616 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
617 res->attr.store.exc.pin_state = op_pin_state_pinned;
618 res->attr.store.volatility = volatility_non_volatile;
619 res = optimize_node(res);
620 IRN_VRFY_IRG(res, irg);
625 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
626 ir_node *size, type *alloc_type, where_alloc where)
633 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
634 res->attr.a.exc.pin_state = op_pin_state_pinned;
635 res->attr.a.where = where;
636 res->attr.a.type = alloc_type;
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
644 ir_node *ptr, ir_node *size, type *free_type)
652 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
653 res->attr.f = free_type;
654 res = optimize_node(res);
655 IRN_VRFY_IRG(res, irg);
660 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
661 int arity, ir_node **in, entity *ent)
667 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
670 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
673 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
674 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
675 res->attr.s.ent = ent;
676 res = optimize_node(res);
677 IRN_VRFY_IRG(res, irg);
682 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
683 ir_node *objptr, type *ent)
690 NEW_ARR_A(ir_node *, r_in, r_arity);
694 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
695 res->attr.io.ent = ent;
697 /* res = optimize(res); */
698 IRN_VRFY_IRG(res, irg);
703 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
704 symconst_kind symkind, type *tp) {
708 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
713 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
715 res->attr.i.num = symkind;
716 res->attr.i.sym = value;
719 res = optimize_node(res);
720 IRN_VRFY_IRG(res, irg);
725 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
726 symconst_kind symkind)
728 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
732 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
733 symconst_symbol sym = {(type *)symbol};
734 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
737 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
738 symconst_symbol sym = {(type *)symbol};
739 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
742 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
743 symconst_symbol sym = {symbol};
744 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
747 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
748 symconst_symbol sym = {symbol};
749 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
753 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
757 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
764 new_rd_Bad (ir_graph *irg)
770 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
772 ir_node *in[2], *res;
776 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
777 res->attr.confirm_cmp = cmp;
778 res = optimize_node (res);
779 IRN_VRFY_IRG(res, irg);
784 new_rd_Unknown (ir_graph *irg, ir_mode *m)
786 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
790 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
795 in[0] = get_Call_ptr(call);
796 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
797 /* res->attr.callbegin.irg = irg; */
798 res->attr.callbegin.call = call;
799 res = optimize_node(res);
800 IRN_VRFY_IRG(res, irg);
805 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
809 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
811 IRN_VRFY_IRG(res, irg);
816 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
820 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
821 irg->end_except = res;
822 IRN_VRFY_IRG (res, irg);
827 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
831 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
832 res = optimize_node(res);
833 IRN_VRFY_IRG(res, irg);
838 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
843 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
844 res->attr.filter.proj = proj;
845 res->attr.filter.in_cg = NULL;
846 res->attr.filter.backedge = NULL;
849 assert(get_Proj_pred(res));
850 assert(get_nodes_block(get_Proj_pred(res)));
852 res = optimize_node(res);
853 IRN_VRFY_IRG(res, irg);
859 new_rd_NoMem (ir_graph *irg)
865 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
866 return new_rd_Block(NULL, irg, arity, in);
868 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
869 return new_rd_Start(NULL, irg, block);
871 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
872 return new_rd_End(NULL, irg, block);
874 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
875 return new_rd_Jmp(NULL, irg, block);
877 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
878 return new_rd_Cond(NULL, irg, block, c);
880 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
881 ir_node *store, int arity, ir_node **in) {
882 return new_rd_Return(NULL, irg, block, store, arity, in);
884 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
885 ir_node *store, ir_node *obj) {
886 return new_rd_Raise(NULL, irg, block, store, obj);
888 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
889 ir_mode *mode, tarval *con) {
890 return new_rd_Const(NULL, irg, block, mode, con);
892 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
893 symconst_symbol value, symconst_kind symkind) {
894 return new_rd_SymConst(NULL, irg, block, value, symkind);
896 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
897 ir_node *objptr, int n_index, ir_node **index,
899 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
901 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
903 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
905 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
906 ir_node *callee, int arity, ir_node **in,
908 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
910 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
911 ir_node *op1, ir_node *op2, ir_mode *mode) {
912 return new_rd_Add(NULL, irg, block, op1, op2, mode);
914 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
915 ir_node *op1, ir_node *op2, ir_mode *mode) {
916 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
918 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
919 ir_node *op, ir_mode *mode) {
920 return new_rd_Minus(NULL, irg, block, op, mode);
922 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
923 ir_node *op1, ir_node *op2, ir_mode *mode) {
924 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
926 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
927 ir_node *memop, ir_node *op1, ir_node *op2) {
928 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
930 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
931 ir_node *memop, ir_node *op1, ir_node *op2) {
932 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
934 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
935 ir_node *memop, ir_node *op1, ir_node *op2) {
936 return new_rd_Div(NULL, irg, block, memop, op1, op2);
938 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
939 ir_node *memop, ir_node *op1, ir_node *op2) {
940 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
942 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
943 ir_node *op, ir_mode *mode) {
944 return new_rd_Abs(NULL, irg, block, op, mode);
946 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
947 ir_node *op1, ir_node *op2, ir_mode *mode) {
948 return new_rd_And(NULL, irg, block, op1, op2, mode);
950 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
951 ir_node *op1, ir_node *op2, ir_mode *mode) {
952 return new_rd_Or(NULL, irg, block, op1, op2, mode);
954 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
955 ir_node *op1, ir_node *op2, ir_mode *mode) {
956 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
958 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
959 ir_node *op, ir_mode *mode) {
960 return new_rd_Not(NULL, irg, block, op, mode);
962 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
963 ir_node *op1, ir_node *op2) {
964 return new_rd_Cmp(NULL, irg, block, op1, op2);
966 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
967 ir_node *op, ir_node *k, ir_mode *mode) {
968 return new_rd_Shl(NULL, irg, block, op, k, mode);
970 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
971 ir_node *op, ir_node *k, ir_mode *mode) {
972 return new_rd_Shr(NULL, irg, block, op, k, mode);
974 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
975 ir_node *op, ir_node *k, ir_mode *mode) {
976 return new_rd_Shrs(NULL, irg, block, op, k, mode);
978 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
979 ir_node *op, ir_node *k, ir_mode *mode) {
980 return new_rd_Rot(NULL, irg, block, op, k, mode);
982 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
983 ir_node *op, ir_mode *mode) {
984 return new_rd_Conv(NULL, irg, block, op, mode);
986 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
987 return new_rd_Cast(NULL, irg, block, op, to_tp);
989 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
990 ir_node **in, ir_mode *mode) {
991 return new_rd_Phi(NULL, irg, block, arity, in, mode);
993 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
994 ir_node *store, ir_node *adr, ir_mode *mode) {
995 return new_rd_Load(NULL, irg, block, store, adr, mode);
997 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
998 ir_node *store, ir_node *adr, ir_node *val) {
999 return new_rd_Store(NULL, irg, block, store, adr, val);
1001 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1002 ir_node *size, type *alloc_type, where_alloc where) {
1003 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1005 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1006 ir_node *ptr, ir_node *size, type *free_type) {
1007 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
1009 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1010 return new_rd_Sync(NULL, irg, block, arity, in);
1012 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1013 ir_mode *mode, long proj) {
1014 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1016 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1018 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1020 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1021 int arity, ir_node **in) {
1022 return new_rd_Tuple(NULL, irg, block, arity, in );
1024 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1025 ir_node *val, ir_mode *mode) {
1026 return new_rd_Id(NULL, irg, block, val, mode);
1028 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1029 return new_rd_Bad(irg);
1031 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1032 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1034 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1035 return new_rd_Unknown(irg, m);
1037 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1038 return new_rd_CallBegin(NULL, irg, block, callee);
1040 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1041 return new_rd_EndReg(NULL, irg, block);
1043 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1044 return new_rd_EndExcept(NULL, irg, block);
1046 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1047 return new_rd_Break(NULL, irg, block);
1049 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1050 ir_mode *mode, long proj) {
1051 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1053 INLINE ir_node *new_r_NoMem (ir_graph *irg) {
1054 return new_rd_NoMem(irg);
1058 /** ********************/
1059 /** public interfaces */
1060 /** construction tools */
1064 * - create a new Start node in the current block
1066 * @return s - pointer to the created Start node
1071 new_d_Start (dbg_info* db)
1075 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1076 op_Start, mode_T, 0, NULL);
1077 /* res->attr.start.irg = current_ir_graph; */
1079 res = optimize_node(res);
1080 IRN_VRFY_IRG(res, current_ir_graph);
1085 new_d_End (dbg_info* db)
1088 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1089 op_End, mode_X, -1, NULL);
1090 res = optimize_node(res);
1091 IRN_VRFY_IRG(res, current_ir_graph);
1096 /* Constructs a Block with a fixed number of predecessors.
1097 Does set current_block. Can be used with automatic Phi
1098 node construction. */
1100 new_d_Block (dbg_info* db, int arity, ir_node **in)
1104 bool has_unknown = false;
1106 res = new_rd_Block(db, current_ir_graph, arity, in);
1108 /* Create and initialize array for Phi-node construction. */
1109 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1110 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1111 current_ir_graph->n_loc);
1112 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1115 for (i = arity-1; i >= 0; i--)
1116 if (get_irn_op(in[i]) == op_Unknown) {
1121 if (!has_unknown) res = optimize_node(res);
1122 current_ir_graph->current_block = res;
1124 IRN_VRFY_IRG(res, current_ir_graph);
1129 /* ***********************************************************************/
1130 /* Methods necessary for automatic Phi node creation */
1132 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1133 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1134 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1135 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1137 Call Graph: ( A ---> B == A "calls" B)
1139 get_value mature_immBlock
1147 get_r_value_internal |
1151 new_rd_Phi0 new_rd_Phi_in
1153 * *************************************************************************** */
1155 /** Creates a Phi node with 0 predecessors */
1156 static INLINE ir_node *
1157 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1161 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1162 IRN_VRFY_IRG(res, irg);
1166 /* There are two implementations of the Phi node construction. The first
1167 is faster, but does not work for blocks with more than 2 predecessors.
1168 The second works always but is slower and causes more unnecessary Phi
1170 Select the implementations by the following preprocessor flag set in
1172 #if USE_FAST_PHI_CONSTRUCTION
1174 /* This is a stack used for allocating and deallocating nodes in
1175 new_rd_Phi_in. The original implementation used the obstack
1176 to model this stack, now it is explicit. This reduces side effects.
1178 #if USE_EXPLICIT_PHI_IN_STACK
1179 INLINE Phi_in_stack *
1180 new_Phi_in_stack(void) {
1183 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1185 res->stack = NEW_ARR_F (ir_node *, 0);
1192 free_Phi_in_stack(Phi_in_stack *s) {
1193 DEL_ARR_F(s->stack);
1197 free_to_Phi_in_stack(ir_node *phi) {
1198 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1199 current_ir_graph->Phi_in_stack->pos)
1200 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1202 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1204 (current_ir_graph->Phi_in_stack->pos)++;
1207 static INLINE ir_node *
1208 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1209 int arity, ir_node **in) {
1211 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1212 int pos = current_ir_graph->Phi_in_stack->pos;
1216 /* We need to allocate a new node */
1217 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1218 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1220 /* reuse the old node and initialize it again. */
1223 assert (res->kind == k_ir_node);
1224 assert (res->op == op_Phi);
1228 assert (arity >= 0);
1229 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1230 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1232 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1234 (current_ir_graph->Phi_in_stack->pos)--;
1238 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1240 /* Creates a Phi node with a given, fixed array **in of predecessors.
1241 If the Phi node is unnecessary, as the same value reaches the block
1242 through all control flow paths, it is eliminated and the value
1243 returned directly. This constructor is only intended for use in
1244 the automatic Phi node generation triggered by get_value or mature.
1245 The implementation is quite tricky and depends on the fact, that
1246 the nodes are allocated on a stack:
1247 The in array contains predecessors and NULLs. The NULLs appear,
1248 if get_r_value_internal, that computed the predecessors, reached
1249 the same block on two paths. In this case the same value reaches
1250 this block on both paths, there is no definition in between. We need
1251 not allocate a Phi where these path's merge, but we have to communicate
1252 this fact to the caller. This happens by returning a pointer to the
1253 node the caller _will_ allocate. (Yes, we predict the address. We can
1254 do so because the nodes are allocated on the obstack.) The caller then
1255 finds a pointer to itself and, when this routine is called again,
1258 static INLINE ir_node *
1259 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1262 ir_node *res, *known;
1264 /* Allocate a new node on the obstack. This can return a node to
1265 which some of the pointers in the in-array already point.
1266 Attention: the constructor copies the in array, i.e., the later
1267 changes to the array in this routine do not affect the
1268 constructed node! If the in array contains NULLs, there will be
1269 missing predecessors in the returned node. Is this a possible
1270 internal state of the Phi node generation? */
1271 #if USE_EXPLICIT_PHI_IN_STACK
1272 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1274 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1275 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1278 /* The in-array can contain NULLs. These were returned by
1279 get_r_value_internal if it reached the same block/definition on a
1280 second path. The NULLs are replaced by the node itself to
1281 simplify the test in the next loop. */
1282 for (i = 0; i < ins; ++i) {
1287 /* This loop checks whether the Phi has more than one predecessor.
1288 If so, it is a real Phi node and we break the loop. Else the Phi
1289 node merges the same definition on several paths and therefore is
1291 for (i = 0; i < ins; ++i)
1293 if (in[i] == res || in[i] == known) continue;
1301 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1303 #if USE_EXPLICIT_PHI_IN_STACK
1304 free_to_Phi_in_stack(res);
1306 obstack_free (current_ir_graph->obst, res);
1310 res = optimize_node (res);
1311 IRN_VRFY_IRG(res, irg);
1314 /* return the pointer to the Phi node. This node might be deallocated! */
1319 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1322 allocates and returns this node. The routine called to allocate the
1323 node might optimize it away and return a real value, or even a pointer
1324 to a deallocated Phi node on top of the obstack!
1325 This function is called with an in-array of proper size. **/
1327 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1329 ir_node *prevBlock, *res;
1332 /* This loop goes to all predecessor blocks of the block the Phi node is in
1333 and there finds the operands of the Phi node by calling
1334 get_r_value_internal. */
1335 for (i = 1; i <= ins; ++i) {
1336 assert (block->in[i]);
1337 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1339 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1342 /* After collecting all predecessors into the array nin a new Phi node
1343 with these predecessors is created. This constructor contains an
1344 optimization: If all predecessors of the Phi node are identical it
1345 returns the only operand instead of a new Phi node. If the value
1346 passes two different control flow edges without being defined, and
1347 this is the second path treated, a pointer to the node that will be
1348 allocated for the first path (recursion) is returned. We already
1349 know the address of this node, as it is the next node to be allocated
1350 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1351 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1353 /* Now we now the value for "pos" and can enter it in the array with
1354 all known local variables. Attention: this might be a pointer to
1355 a node, that later will be allocated!!! See new_rd_Phi_in.
1356 If this is called in mature, after some set_value in the same block,
1357 the proper value must not be overwritten:
1359 get_value (makes Phi0, put's it into graph_arr)
1360 set_value (overwrites Phi0 in graph_arr)
1361 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1364 if (!block->attr.block.graph_arr[pos]) {
1365 block->attr.block.graph_arr[pos] = res;
1367 /* printf(" value already computed by %s\n",
1368 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1374 /* This function returns the last definition of a variable. In case
1375 this variable was last defined in a previous block, Phi nodes are
1376 inserted. If the part of the firm graph containing the definition
1377 is not yet constructed, a dummy Phi node is returned. */
1379 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1382 /* There are 4 cases to treat.
1384 1. The block is not mature and we visit it the first time. We can not
1385 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1386 predecessors is returned. This node is added to the linked list (field
1387 "link") of the containing block to be completed when this block is
1388 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1391 2. The value is already known in this block, graph_arr[pos] is set and we
1392 visit the block the first time. We can return the value without
1393 creating any new nodes.
1395 3. The block is mature and we visit it the first time. A Phi node needs
1396 to be created (phi_merge). If the Phi is not needed, as all it's
1397 operands are the same value reaching the block through different
1398 paths, it's optimized away and the value itself is returned.
1400 4. The block is mature, and we visit it the second time. Now two
1401 subcases are possible:
1402 * The value was computed completely the last time we were here. This
1403 is the case if there is no loop. We can return the proper value.
1404 * The recursion that visited this node and set the flag did not
1405 return yet. We are computing a value in a loop and need to
1406 break the recursion without knowing the result yet.
1407 @@@ strange case. Straight forward we would create a Phi before
1408 starting the computation of it's predecessors. In this case we will
1409 find a Phi here in any case. The problem is that this implementation
1410 only creates a Phi after computing the predecessors, so that it is
1411 hard to compute self references of this Phi. @@@
1412 There is no simple check for the second subcase. Therefore we check
1413 for a second visit and treat all such cases as the second subcase.
1414 Anyways, the basic situation is the same: we reached a block
1415 on two paths without finding a definition of the value: No Phi
1416 nodes are needed on both paths.
1417 We return this information "Two paths, no Phi needed" by a very tricky
1418 implementation that relies on the fact that an obstack is a stack and
1419 will return a node with the same address on different allocations.
1420 Look also at phi_merge and new_rd_phi_in to understand this.
1421 @@@ Unfortunately this does not work, see testprogram
1422 three_cfpred_example.
1426 /* case 4 -- already visited. */
1427 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1429 /* visited the first time */
1430 set_irn_visited(block, get_irg_visited(current_ir_graph));
1432 /* Get the local valid value */
1433 res = block->attr.block.graph_arr[pos];
1435 /* case 2 -- If the value is actually computed, return it. */
1436 if (res) return res;
1438 if (block->attr.block.matured) { /* case 3 */
1440 /* The Phi has the same amount of ins as the corresponding block. */
1441 int ins = get_irn_arity(block);
1443 NEW_ARR_A (ir_node *, nin, ins);
1445 /* Phi merge collects the predecessors and then creates a node. */
1446 res = phi_merge (block, pos, mode, nin, ins);
1448 } else { /* case 1 */
1449 /* The block is not mature, we don't know how many in's are needed. A Phi
1450 with zero predecessors is created. Such a Phi node is called Phi0
1451 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1452 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1454 The Phi0 has to remember the pos of it's internal value. If the real
1455 Phi is computed, pos is used to update the array with the local
1458 res = new_rd_Phi0 (current_ir_graph, block, mode);
1459 res->attr.phi0_pos = pos;
1460 res->link = block->link;
1464 /* If we get here, the frontend missed a use-before-definition error */
1467 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1468 assert (mode->code >= irm_F && mode->code <= irm_P);
1469 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1470 tarval_mode_null[mode->code]);
1473 /* The local valid value is available now. */
1474 block->attr.block.graph_arr[pos] = res;
1482 it starts the recursion. This causes an Id at the entry of
1483 every block that has no definition of the value! **/
1485 #if USE_EXPLICIT_PHI_IN_STACK
1487 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1488 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1491 static INLINE ir_node *
1492 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1493 ir_node **in, int ins, ir_node *phi0)
1496 ir_node *res, *known;
1498 /* Allocate a new node on the obstack. The allocation copies the in
1500 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1501 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1503 /* This loop checks whether the Phi has more than one predecessor.
1504 If so, it is a real Phi node and we break the loop. Else the
1505 Phi node merges the same definition on several paths and therefore
1506 is not needed. Don't consider Bad nodes! */
1508 for (i=0; i < ins; ++i)
1512 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1514 /* Optimize self referencing Phis: We can't detect them yet properly, as
1515 they still refer to the Phi0 they will replace. So replace right now. */
1516 if (phi0 && in[i] == phi0) in[i] = res;
1518 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1526 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1529 obstack_free (current_ir_graph->obst, res);
1530 if (is_Phi(known)) {
1531 /* If pred is a phi node we want to optmize it: If loops are matured in a bad
1532 order, an enclosing Phi know may get superfluous. */
1533 res = optimize_in_place_2(known);
1534 if (res != known) { exchange(known, res); }
1539 /* A undefined value, e.g., in unreachable code. */
1543 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1544 IRN_VRFY_IRG(res, irg);
1545 /* Memory Phis in endless loops must be kept alive.
1546 As we can't distinguish these easily we keep all of them alive. */
1547 if ((res->op == op_Phi) && (mode == mode_M))
1548 add_End_keepalive(irg->end, res);
1555 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1557 #if PRECISE_EXC_CONTEXT
1559 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1561 /* Construct a new frag_array for node n.
1562 Copy the content from the current graph_arr of the corresponding block:
1563 this is the current state.
1564 Set ProjM(n) as current memory state.
1565 Further the last entry in frag_arr of current block points to n. This
1566 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1568 static INLINE ir_node ** new_frag_arr (ir_node *n)
1573 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1574 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1575 sizeof(ir_node *)*current_ir_graph->n_loc);
1577 /* turn off optimization before allocating Proj nodes, as res isn't
1579 opt = get_opt_optimize(); set_optimize(0);
1580 /* Here we rely on the fact that all frag ops have Memory as first result! */
1581 if (get_irn_op(n) == op_Call)
1582 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1584 assert((pn_Quot_M == pn_DivMod_M) &&
1585 (pn_Quot_M == pn_Div_M) &&
1586 (pn_Quot_M == pn_Mod_M) &&
1587 (pn_Quot_M == pn_Load_M) &&
1588 (pn_Quot_M == pn_Store_M) &&
1589 (pn_Quot_M == pn_Alloc_M) );
1590 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1594 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1599 * returns the frag_arr from a node
1601 static INLINE ir_node **
1602 get_frag_arr (ir_node *n) {
1603 switch (get_irn_opcode(n)) {
1605 return n->attr.call.exc.frag_arr;
1607 return n->attr.a.exc.frag_arr;
1609 return n->attr.load.exc.frag_arr;
1611 return n->attr.store.exc.frag_arr;
1613 return n->attr.except.frag_arr;
1618 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1620 if (!frag_arr[pos]) frag_arr[pos] = val;
1621 if (frag_arr[current_ir_graph->n_loc - 1]) {
1622 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1623 assert(arr != frag_arr && "Endless recursion detected");
1624 set_frag_value(arr, pos, val);
1629 for (i = 0; i < 1000; ++i) {
1630 if (!frag_arr[pos]) {
1631 frag_arr[pos] = val;
1633 if (frag_arr[current_ir_graph->n_loc - 1]) {
1634 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1640 assert(0 && "potential endless recursion");
1645 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1649 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1651 frag_arr = get_frag_arr(cfOp);
1652 res = frag_arr[pos];
1654 if (block->attr.block.graph_arr[pos]) {
1655 /* There was a set_value after the cfOp and no get_value before that
1656 set_value. We must build a Phi node now. */
1657 if (block->attr.block.matured) {
1658 int ins = get_irn_arity(block);
1660 NEW_ARR_A (ir_node *, nin, ins);
1661 res = phi_merge(block, pos, mode, nin, ins);
1663 res = new_rd_Phi0 (current_ir_graph, block, mode);
1664 res->attr.phi0_pos = pos;
1665 res->link = block->link;
1669 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1670 but this should be better: (remove comment if this works) */
1671 /* It's a Phi, we can write this into all graph_arrs with NULL */
1672 set_frag_value(block->attr.block.graph_arr, pos, res);
1674 res = get_r_value_internal(block, pos, mode);
1675 set_frag_value(block->attr.block.graph_arr, pos, res);
1683 computes the predecessors for the real phi node, and then
1684 allocates and returns this node. The routine called to allocate the
1685 node might optimize it away and return a real value.
1686 This function must be called with an in-array of proper size. **/
1688 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1690 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1693 /* If this block has no value at pos create a Phi0 and remember it
1694 in graph_arr to break recursions.
1695 Else we may not set graph_arr as there a later value is remembered. */
1697 if (!block->attr.block.graph_arr[pos]) {
1698 if (block == get_irg_start_block(current_ir_graph)) {
1699 /* Collapsing to Bad tarvals is no good idea.
1700 So we call a user-supplied routine here that deals with this case as
1701 appropriate for the given language. Sorryly the only help we can give
1702 here is the position.
1704 Even if all variables are defined before use, it can happen that
1705 we get to the start block, if a cond has been replaced by a tuple
1706 (bad, jmp). In this case we call the function needlessly, eventually
1707 generating an non existant error.
1708 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1711 if (default_initialize_local_variable)
1712 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
1714 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1715 /* We don't need to care about exception ops in the start block.
1716 There are none by definition. */
1717 return block->attr.block.graph_arr[pos];
1719 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1720 block->attr.block.graph_arr[pos] = phi0;
1721 #if PRECISE_EXC_CONTEXT
1722 if (get_opt_precise_exc_context()) {
1723 /* Set graph_arr for fragile ops. Also here we should break recursion.
1724 We could choose a cyclic path through an cfop. But the recursion would
1725 break at some point. */
1726 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1732 /* This loop goes to all predecessor blocks of the block the Phi node
1733 is in and there finds the operands of the Phi node by calling
1734 get_r_value_internal. */
1735 for (i = 1; i <= ins; ++i) {
1736 prevCfOp = skip_Proj(block->in[i]);
1738 if (is_Bad(prevCfOp)) {
1739 /* In case a Cond has been optimized we would get right to the start block
1740 with an invalid definition. */
1741 nin[i-1] = new_Bad();
1744 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1746 if (!is_Bad(prevBlock)) {
1747 #if PRECISE_EXC_CONTEXT
1748 if (get_opt_precise_exc_context() &&
1749 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1750 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1751 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1754 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1756 nin[i-1] = new_Bad();
1760 /* We want to pass the Phi0 node to the constructor: this finds additional
1761 optimization possibilities.
1762 The Phi0 node either is allocated in this function, or it comes from
1763 a former call to get_r_value_internal. In this case we may not yet
1764 exchange phi0, as this is done in mature_immBlock. */
1766 phi0_all = block->attr.block.graph_arr[pos];
1767 if (!((get_irn_op(phi0_all) == op_Phi) &&
1768 (get_irn_arity(phi0_all) == 0) &&
1769 (get_nodes_block(phi0_all) == block)))
1775 /* After collecting all predecessors into the array nin a new Phi node
1776 with these predecessors is created. This constructor contains an
1777 optimization: If all predecessors of the Phi node are identical it
1778 returns the only operand instead of a new Phi node. */
1779 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1781 /* In case we allocated a Phi0 node at the beginning of this procedure,
1782 we need to exchange this Phi0 with the real Phi. */
1784 exchange(phi0, res);
1785 block->attr.block.graph_arr[pos] = res;
1786 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1787 only an optimization. */
1793 /* This function returns the last definition of a variable. In case
1794 this variable was last defined in a previous block, Phi nodes are
1795 inserted. If the part of the firm graph containing the definition
1796 is not yet constructed, a dummy Phi node is returned. */
1798 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1801 /* There are 4 cases to treat.
1803 1. The block is not mature and we visit it the first time. We can not
1804 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1805 predecessors is returned. This node is added to the linked list (field
1806 "link") of the containing block to be completed when this block is
1807 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1810 2. The value is already known in this block, graph_arr[pos] is set and we
1811 visit the block the first time. We can return the value without
1812 creating any new nodes.
1814 3. The block is mature and we visit it the first time. A Phi node needs
1815 to be created (phi_merge). If the Phi is not needed, as all it's
1816 operands are the same value reaching the block through different
1817 paths, it's optimized away and the value itself is returned.
1819 4. The block is mature, and we visit it the second time. Now two
1820 subcases are possible:
1821 * The value was computed completely the last time we were here. This
1822 is the case if there is no loop. We can return the proper value.
1823 * The recursion that visited this node and set the flag did not
1824 return yet. We are computing a value in a loop and need to
1825 break the recursion. This case only happens if we visited
1826 the same block with phi_merge before, which inserted a Phi0.
1827 So we return the Phi0.
1830 /* case 4 -- already visited. */
1831 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1832 /* As phi_merge allocates a Phi0 this value is always defined. Here
1833 is the critical difference of the two algorithms. */
1834 assert(block->attr.block.graph_arr[pos]);
1835 return block->attr.block.graph_arr[pos];
1838 /* visited the first time */
1839 set_irn_visited(block, get_irg_visited(current_ir_graph));
1841 /* Get the local valid value */
1842 res = block->attr.block.graph_arr[pos];
1844 /* case 2 -- If the value is actually computed, return it. */
1845 if (res) { return res; };
1847 if (block->attr.block.matured) { /* case 3 */
1849 /* The Phi has the same amount of ins as the corresponding block. */
1850 int ins = get_irn_arity(block);
1852 NEW_ARR_A (ir_node *, nin, ins);
1854 /* Phi merge collects the predecessors and then creates a node. */
1855 res = phi_merge (block, pos, mode, nin, ins);
1857 } else { /* case 1 */
1858 /* The block is not mature, we don't know how many in's are needed. A Phi
1859 with zero predecessors is created. Such a Phi node is called Phi0
1860 node. The Phi0 is then added to the list of Phi0 nodes in this block
1861 to be matured by mature_immBlock later.
1862 The Phi0 has to remember the pos of it's internal value. If the real
1863 Phi is computed, pos is used to update the array with the local
1865 res = new_rd_Phi0 (current_ir_graph, block, mode);
1866 res->attr.phi0_pos = pos;
1867 res->link = block->link;
1871 /* If we get here, the frontend missed a use-before-definition error */
1874 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1875 assert (mode->code >= irm_F && mode->code <= irm_P);
1876 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1877 get_mode_null(mode));
1880 /* The local valid value is available now. */
1881 block->attr.block.graph_arr[pos] = res;
1886 #endif /* USE_FAST_PHI_CONSTRUCTION */
1888 /* ************************************************************************** */
1890 /** Finalize a Block node, when all control flows are known. */
1891 /** Acceptable parameters are only Block nodes. */
1893 mature_immBlock (ir_node *block)
1900 assert (get_irn_opcode(block) == iro_Block);
1901 /* @@@ should be commented in
1902 assert (!get_Block_matured(block) && "Block already matured"); */
1904 if (!get_Block_matured(block)) {
1905 ins = ARR_LEN (block->in)-1;
1906 /* Fix block parameters */
1907 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1909 /* An array for building the Phi nodes. */
1910 NEW_ARR_A (ir_node *, nin, ins);
1912 /* Traverse a chain of Phi nodes attached to this block and mature
1914 for (n = block->link; n; n=next) {
1915 inc_irg_visited(current_ir_graph);
1917 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1920 block->attr.block.matured = 1;
1922 /* Now, as the block is a finished firm node, we can optimize it.
1923 Since other nodes have been allocated since the block was created
1924 we can not free the node on the obstack. Therefore we have to call
1926 Unfortunately the optimization does not change a lot, as all allocated
1927 nodes refer to the unoptimized node.
1928 We can call _2, as global cse has no effect on blocks. */
1929 block = optimize_in_place_2(block);
1930 IRN_VRFY_IRG(block, current_ir_graph);
1935 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1937 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1942 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1944 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1949 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1951 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
1957 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1959 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
1964 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1966 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
1971 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1974 assert(arg->op == op_Cond);
1975 arg->attr.c.kind = fragmentary;
1976 arg->attr.c.default_proj = max_proj;
1977 res = new_Proj (arg, mode_X, max_proj);
1982 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1984 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
1989 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1991 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1995 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1997 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2002 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2004 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2009 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2011 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2017 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2019 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2024 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2026 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2031 * allocate the frag array
2033 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2034 if (get_opt_precise_exc_context()) {
2035 if ((current_ir_graph->phase_state == phase_building) &&
2036 (get_irn_op(res) == op) && /* Could be optimized away. */
2037 !*frag_store) /* Could be a cse where the arr is already set. */ {
2038 *frag_store = new_frag_arr(res);
2045 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2048 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2050 res->attr.except.pin_state = op_pin_state_pinned;
2051 #if PRECISE_EXC_CONTEXT
2052 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2059 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2062 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2064 res->attr.except.pin_state = op_pin_state_pinned;
2065 #if PRECISE_EXC_CONTEXT
2066 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2073 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2076 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2078 res->attr.except.pin_state = op_pin_state_pinned;
2079 #if PRECISE_EXC_CONTEXT
2080 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2087 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2090 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2092 res->attr.except.pin_state = op_pin_state_pinned;
2093 #if PRECISE_EXC_CONTEXT
2094 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2101 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2103 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2108 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2110 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2115 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2117 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2122 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2124 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2129 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2131 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2136 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2138 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2143 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2145 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2150 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2152 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2157 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2159 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2164 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2166 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2171 new_d_Jmp (dbg_info* db)
2173 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2177 new_d_Cond (dbg_info* db, ir_node *c)
2179 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2183 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2187 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2188 store, callee, arity, in, tp);
2189 #if PRECISE_EXC_CONTEXT
2190 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2197 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2199 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2204 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2206 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2211 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2214 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2216 #if PRECISE_EXC_CONTEXT
2217 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2224 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2227 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2229 #if PRECISE_EXC_CONTEXT
2230 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2237 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2241 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2242 store, size, alloc_type, where);
2243 #if PRECISE_EXC_CONTEXT
2244 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2251 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2253 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2254 store, ptr, size, free_type);
2258 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2259 /* GL: objptr was called frame before. Frame was a bad choice for the name
2260 as the operand could as well be a pointer to a dynamic object. */
2262 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2263 store, objptr, 0, NULL, ent);
2267 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2269 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2270 store, objptr, n_index, index, sel);
2274 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2276 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2277 store, objptr, ent));
2281 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2283 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2288 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2290 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2295 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2297 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2305 return __new_d_Bad();
2309 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2311 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2316 new_d_Unknown (ir_mode *m)
2318 return new_rd_Unknown(current_ir_graph, m);
2322 new_d_CallBegin (dbg_info *db, ir_node *call)
2325 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2330 new_d_EndReg (dbg_info *db)
2333 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2338 new_d_EndExcept (dbg_info *db)
2341 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2346 new_d_Break (dbg_info *db)
2348 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2352 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2354 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2361 return __new_d_NoMem();
2364 /* ********************************************************************* */
2365 /* Comfortable interface with automatic Phi node construction. */
2366 /* (Uses also constructors of ?? interface, except new_Block. */
2367 /* ********************************************************************* */
2369 /* * Block construction **/
2370 /* immature Block without predecessors */
2371 ir_node *new_d_immBlock (dbg_info* db) {
2374 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2375 /* creates a new dynamic in-array as length of in is -1 */
2376 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2377 current_ir_graph->current_block = res;
2378 res->attr.block.matured = 0;
2379 res->attr.block.dead = 0;
2380 /* res->attr.block.exc = exc_normal; */
2381 /* res->attr.block.handler_entry = 0; */
2382 res->attr.block.irg = current_ir_graph;
2383 res->attr.block.backedge = NULL;
2384 res->attr.block.in_cg = NULL;
2385 res->attr.block.cg_backedge = NULL;
2386 set_Block_block_visited(res, 0);
2388 /* Create and initialize array for Phi-node construction. */
2389 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2390 current_ir_graph->n_loc);
2391 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2393 /* Immature block may not be optimized! */
2394 IRN_VRFY_IRG(res, current_ir_graph);
2400 new_immBlock (void) {
2401 return new_d_immBlock(NULL);
2404 /* add an adge to a jmp/control flow node */
2406 add_immBlock_pred (ir_node *block, ir_node *jmp)
2408 if (block->attr.block.matured) {
2409 assert(0 && "Error: Block already matured!\n");
2412 assert(jmp != NULL);
2413 ARR_APP1(ir_node *, block->in, jmp);
2417 /* changing the current block */
2419 set_cur_block (ir_node *target)
2421 current_ir_graph->current_block = target;
2424 /* ************************ */
2425 /* parameter administration */
2427 /* get a value from the parameter array from the current block by its index */
2429 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2431 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2432 inc_irg_visited(current_ir_graph);
2434 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2436 /* get a value from the parameter array from the current block by its index */
2438 get_value (int pos, ir_mode *mode)
2440 return get_d_value(NULL, pos, mode);
2443 /* set a value at position pos in the parameter array from the current block */
2445 set_value (int pos, ir_node *value)
2447 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2448 assert(pos+1 < current_ir_graph->n_loc);
2449 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2452 /* get the current store */
2456 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2457 /* GL: one could call get_value instead */
2458 inc_irg_visited(current_ir_graph);
2459 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2462 /* set the current store */
2464 set_store (ir_node *store)
2466 /* GL: one could call set_value instead */
2467 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2468 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2472 keep_alive (ir_node *ka)
2474 add_End_keepalive(current_ir_graph->end, ka);
2477 /** Useful access routines **/
2478 /* Returns the current block of the current graph. To set the current
2479 block use set_cur_block. */
2480 ir_node *get_cur_block() {
2481 return get_irg_current_block(current_ir_graph);
2484 /* Returns the frame type of the current graph */
2485 type *get_cur_frame_type() {
2486 return get_irg_frame_type(current_ir_graph);
2490 /* ********************************************************************* */
2493 /* call once for each run of the library */
2495 init_cons (default_initialize_local_variable_func_t *func)
2497 default_initialize_local_variable = func;
2500 /* call for each graph */
2502 finalize_cons (ir_graph *irg) {
2503 irg->phase_state = phase_high;
2507 ir_node *new_Block(int arity, ir_node **in) {
2508 return new_d_Block(NULL, arity, in);
2510 ir_node *new_Start (void) {
2511 return new_d_Start(NULL);
2513 ir_node *new_End (void) {
2514 return new_d_End(NULL);
2516 ir_node *new_Jmp (void) {
2517 return new_d_Jmp(NULL);
2519 ir_node *new_Cond (ir_node *c) {
2520 return new_d_Cond(NULL, c);
2522 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2523 return new_d_Return(NULL, store, arity, in);
2525 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2526 return new_d_Raise(NULL, store, obj);
2528 ir_node *new_Const (ir_mode *mode, tarval *con) {
2529 return new_d_Const(NULL, mode, con);
2532 ir_node *new_Const_type(tarval *con, type *tp) {
2533 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2536 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2537 return new_d_SymConst(NULL, value, kind);
2539 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2540 return new_d_simpleSel(NULL, store, objptr, ent);
2542 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2544 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2546 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2547 return new_d_InstOf (NULL, store, objptr, ent);
2549 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2551 return new_d_Call(NULL, store, callee, arity, in, tp);
2553 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2554 return new_d_Add(NULL, op1, op2, mode);
2556 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2557 return new_d_Sub(NULL, op1, op2, mode);
2559 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2560 return new_d_Minus(NULL, op, mode);
2562 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2563 return new_d_Mul(NULL, op1, op2, mode);
2565 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2566 return new_d_Quot(NULL, memop, op1, op2);
2568 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2569 return new_d_DivMod(NULL, memop, op1, op2);
2571 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2572 return new_d_Div(NULL, memop, op1, op2);
2574 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2575 return new_d_Mod(NULL, memop, op1, op2);
2577 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2578 return new_d_Abs(NULL, op, mode);
2580 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2581 return new_d_And(NULL, op1, op2, mode);
2583 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2584 return new_d_Or(NULL, op1, op2, mode);
2586 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2587 return new_d_Eor(NULL, op1, op2, mode);
2589 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2590 return new_d_Not(NULL, op, mode);
2592 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2593 return new_d_Shl(NULL, op, k, mode);
2595 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2596 return new_d_Shr(NULL, op, k, mode);
2598 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2599 return new_d_Shrs(NULL, op, k, mode);
2601 #define new_Rotate new_Rot
2602 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2603 return new_d_Rot(NULL, op, k, mode);
2605 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2606 return new_d_Cmp(NULL, op1, op2);
2608 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2609 return new_d_Conv(NULL, op, mode);
2611 ir_node *new_Cast (ir_node *op, type *to_tp) {
2612 return new_d_Cast(NULL, op, to_tp);
2614 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2615 return new_d_Phi(NULL, arity, in, mode);
2617 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2618 return new_d_Load(NULL, store, addr, mode);
2620 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2621 return new_d_Store(NULL, store, addr, val);
2623 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2624 where_alloc where) {
2625 return new_d_Alloc(NULL, store, size, alloc_type, where);
2627 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2629 return new_d_Free(NULL, store, ptr, size, free_type);
2631 ir_node *new_Sync (int arity, ir_node **in) {
2632 return new_d_Sync(NULL, arity, in);
2634 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2635 return new_d_Proj(NULL, arg, mode, proj);
2637 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2638 return new_d_defaultProj(NULL, arg, max_proj);
2640 ir_node *new_Tuple (int arity, ir_node **in) {
2641 return new_d_Tuple(NULL, arity, in);
2643 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2644 return new_d_Id(NULL, val, mode);
2646 ir_node *new_Bad (void) {
2649 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2650 return new_d_Confirm (NULL, val, bound, cmp);
2652 ir_node *new_Unknown(ir_mode *m) {
2653 return new_d_Unknown(m);
2655 ir_node *new_CallBegin (ir_node *callee) {
2656 return new_d_CallBegin(NULL, callee);
2658 ir_node *new_EndReg (void) {
2659 return new_d_EndReg(NULL);
2661 ir_node *new_EndExcept (void) {
2662 return new_d_EndExcept(NULL);
2664 ir_node *new_Break (void) {
2665 return new_d_Break(NULL);
2667 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2668 return new_d_Filter(NULL, arg, mode, proj);
2670 ir_node *new_NoMem (void) {
2671 return new_d_NoMem();