3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * language dependent initialization variable
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* --------------------------------------------- */
66 /* private interfaces, for professional use only */
67 /* --------------------------------------------- */
69 /* Constructs a Block with a fixed number of predecessors.
70 Does not set current_block. Can not be used with automatic
71 Phi node construction. */
73 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
77 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
78 set_Block_matured(res, 1);
79 set_Block_block_visited(res, 0);
81 /* res->attr.block.exc = exc_normal; */
82 /* res->attr.block.handler_entry = 0; */
83 res->attr.block.dead = 0;
84 res->attr.block.irg = irg;
85 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
86 res->attr.block.in_cg = NULL;
87 res->attr.block.cg_backedge = NULL;
88 res->attr.block.extblk = NULL;
90 IRN_VRFY_IRG(res, irg);
95 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
99 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
100 /* res->attr.start.irg = irg; */
102 IRN_VRFY_IRG(res, irg);
107 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
124 bool has_unknown = false;
126 /* Don't assert that block matured: the use of this constructor is strongly
128 if ( get_Block_matured(block) )
129 assert( get_irn_arity(block) == arity );
131 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
133 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
135 for (i = arity-1; i >= 0; i--)
136 if (get_irn_op(in[i]) == op_Unknown) {
141 if (!has_unknown) res = optimize_node (res);
142 IRN_VRFY_IRG(res, irg);
144 /* Memory Phis in endless loops must be kept alive.
145 As we can't distinguish these easily we keep all of them alive. */
146 if ((res->op == op_Phi) && (mode == mode_M))
147 add_End_keepalive(irg->end, res);
152 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
157 res->attr.con.tv = con;
158 set_Const_type(res, tp); /* Call method because of complex assertion. */
159 res = optimize_node (res);
160 assert(get_Const_type(res) == tp);
161 IRN_VRFY_IRG(res, irg);
167 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
169 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
173 new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
175 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
179 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
183 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
184 res = optimize_node(res);
185 IRN_VRFY_IRG(res, irg);
190 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
195 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
196 res->attr.proj = proj;
199 assert(get_Proj_pred(res));
200 assert(get_nodes_block(get_Proj_pred(res)));
202 res = optimize_node(res);
204 IRN_VRFY_IRG(res, irg);
210 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
214 assert(arg->op == op_Cond);
215 arg->attr.c.kind = fragmentary;
216 arg->attr.c.default_proj = max_proj;
217 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
222 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
226 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
227 res = optimize_node(res);
228 IRN_VRFY_IRG(res, irg);
233 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
237 assert(is_atomic_type(to_tp));
239 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
240 res->attr.cast.totype = to_tp;
241 res = optimize_node(res);
242 IRN_VRFY_IRG(res, irg);
247 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
251 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
252 res = optimize_node (res);
253 IRN_VRFY_IRG(res, irg);
258 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
259 ir_node *op1, ir_node *op2, ir_mode *mode)
266 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
267 res = optimize_node(res);
268 IRN_VRFY_IRG(res, irg);
273 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
274 ir_node *op1, ir_node *op2, ir_mode *mode)
281 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
282 res = optimize_node (res);
283 IRN_VRFY_IRG(res, irg);
288 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *op, ir_mode *mode)
293 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
294 res = optimize_node(res);
295 IRN_VRFY_IRG(res, irg);
300 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *op1, ir_node *op2, ir_mode *mode)
308 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
309 res = optimize_node(res);
310 IRN_VRFY_IRG(res, irg);
315 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
325 res = optimize_node(res);
326 IRN_VRFY_IRG(res, irg);
331 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
340 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
347 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *memop, ir_node *op1, ir_node *op2)
356 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
357 res = optimize_node(res);
358 IRN_VRFY_IRG(res, irg);
363 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
364 ir_node *memop, ir_node *op1, ir_node *op2)
372 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
373 res = optimize_node(res);
374 IRN_VRFY_IRG(res, irg);
379 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
380 ir_node *op1, ir_node *op2, ir_mode *mode)
387 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
388 res = optimize_node(res);
389 IRN_VRFY_IRG(res, irg);
394 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
395 ir_node *op1, ir_node *op2, ir_mode *mode)
402 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
403 res = optimize_node(res);
404 IRN_VRFY_IRG(res, irg);
409 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
410 ir_node *op1, ir_node *op2, ir_mode *mode)
417 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
418 res = optimize_node (res);
419 IRN_VRFY_IRG(res, irg);
424 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
425 ir_node *op, ir_mode *mode)
429 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
437 ir_node *op, ir_node *k, ir_mode *mode)
444 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
445 res = optimize_node(res);
446 IRN_VRFY_IRG(res, irg);
451 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
452 ir_node *op, ir_node *k, ir_mode *mode)
459 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
460 res = optimize_node(res);
461 IRN_VRFY_IRG(res, irg);
466 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
467 ir_node *op, ir_node *k, ir_mode *mode)
474 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
475 res = optimize_node(res);
476 IRN_VRFY_IRG(res, irg);
481 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
482 ir_node *op, ir_node *k, ir_mode *mode)
489 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
490 res = optimize_node(res);
491 IRN_VRFY_IRG(res, irg);
496 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
497 ir_node *op, ir_mode *mode)
501 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
502 res = optimize_node (res);
503 IRN_VRFY_IRG(res, irg);
508 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
509 ir_node *op1, ir_node *op2)
516 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
517 res = optimize_node(res);
518 IRN_VRFY_IRG(res, irg);
523 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
527 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
528 res = optimize_node (res);
529 IRN_VRFY_IRG (res, irg);
534 new_rd_IJmp (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *tgt)
538 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
539 res = optimize_node (res);
540 IRN_VRFY_IRG (res, irg);
542 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
548 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
552 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
553 res->attr.c.kind = dense;
554 res->attr.c.default_proj = 0;
555 res = optimize_node (res);
556 IRN_VRFY_IRG(res, irg);
561 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
562 ir_node *callee, int arity, ir_node **in, type *tp)
569 NEW_ARR_A(ir_node *, r_in, r_arity);
572 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
574 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
576 assert((get_unknown_type() == tp) || is_Method_type(tp));
577 set_Call_type(res, tp);
578 res->attr.call.exc.pin_state = op_pin_state_pinned;
579 res->attr.call.callee_arr = NULL;
580 res = optimize_node(res);
581 IRN_VRFY_IRG(res, irg);
586 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
587 ir_node *store, int arity, ir_node **in)
594 NEW_ARR_A (ir_node *, r_in, r_arity);
596 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
597 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
598 res = optimize_node(res);
599 IRN_VRFY_IRG(res, irg);
604 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
611 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
618 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
619 ir_node *store, ir_node *adr, ir_mode *mode)
626 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
627 res->attr.load.exc.pin_state = op_pin_state_pinned;
628 res->attr.load.load_mode = mode;
629 res->attr.load.volatility = volatility_non_volatile;
630 res = optimize_node(res);
631 IRN_VRFY_IRG(res, irg);
636 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
637 ir_node *store, ir_node *adr, ir_node *val)
645 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
646 res->attr.store.exc.pin_state = op_pin_state_pinned;
647 res->attr.store.volatility = volatility_non_volatile;
648 res = optimize_node(res);
649 IRN_VRFY_IRG(res, irg);
654 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
655 ir_node *size, type *alloc_type, where_alloc where)
662 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
663 res->attr.a.exc.pin_state = op_pin_state_pinned;
664 res->attr.a.where = where;
665 res->attr.a.type = alloc_type;
666 res = optimize_node(res);
667 IRN_VRFY_IRG(res, irg);
672 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
673 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
681 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
682 res->attr.f.where = where;
683 res->attr.f.type = free_type;
684 res = optimize_node(res);
685 IRN_VRFY_IRG(res, irg);
690 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
691 int arity, ir_node **in, entity *ent)
697 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
700 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
703 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
704 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
705 res->attr.s.ent = ent;
706 res = optimize_node(res);
707 IRN_VRFY_IRG(res, irg);
712 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
713 ir_node *objptr, type *ent)
720 NEW_ARR_A(ir_node *, r_in, r_arity);
724 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
725 res->attr.io.ent = ent;
727 /* res = optimize(res); */
728 IRN_VRFY_IRG(res, irg);
733 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
734 symconst_kind symkind, type *tp) {
738 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
743 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
745 res->attr.i.num = symkind;
746 res->attr.i.sym = value;
749 res = optimize_node(res);
750 IRN_VRFY_IRG(res, irg);
755 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
756 symconst_kind symkind)
758 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
762 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
763 symconst_symbol sym = {(type *)symbol};
764 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
767 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
768 symconst_symbol sym = {(type *)symbol};
769 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
772 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
773 symconst_symbol sym = {symbol};
774 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
777 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
778 symconst_symbol sym = {symbol};
779 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
783 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
787 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
788 res = optimize_node(res);
789 IRN_VRFY_IRG(res, irg);
794 new_rd_Bad (ir_graph *irg)
800 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
802 ir_node *in[2], *res;
806 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
807 res->attr.confirm_cmp = cmp;
808 res = optimize_node (res);
809 IRN_VRFY_IRG(res, irg);
814 new_rd_Unknown (ir_graph *irg, ir_mode *m)
816 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
820 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
825 in[0] = get_Call_ptr(call);
826 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
827 /* res->attr.callbegin.irg = irg; */
828 res->attr.callbegin.call = call;
829 res = optimize_node(res);
830 IRN_VRFY_IRG(res, irg);
835 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
839 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
841 IRN_VRFY_IRG(res, irg);
846 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
850 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
851 irg->end_except = res;
852 IRN_VRFY_IRG (res, irg);
857 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
861 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
862 res = optimize_node(res);
863 IRN_VRFY_IRG(res, irg);
868 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
873 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
874 res->attr.filter.proj = proj;
875 res->attr.filter.in_cg = NULL;
876 res->attr.filter.backedge = NULL;
879 assert(get_Proj_pred(res));
880 assert(get_nodes_block(get_Proj_pred(res)));
882 res = optimize_node(res);
883 IRN_VRFY_IRG(res, irg);
888 new_rd_NoMem (ir_graph *irg) {
893 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
894 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
903 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
906 res = optimize_node(res);
907 IRN_VRFY_IRG(res, irg);
912 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
913 return new_rd_Block(NULL, irg, arity, in);
915 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
916 return new_rd_Start(NULL, irg, block);
918 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
919 return new_rd_End(NULL, irg, block);
921 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
922 return new_rd_Jmp(NULL, irg, block);
924 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
925 return new_rd_IJmp(NULL, irg, block, tgt);
927 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
928 return new_rd_Cond(NULL, irg, block, c);
930 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
931 ir_node *store, int arity, ir_node **in) {
932 return new_rd_Return(NULL, irg, block, store, arity, in);
934 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
935 ir_node *store, ir_node *obj) {
936 return new_rd_Raise(NULL, irg, block, store, obj);
938 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
939 ir_mode *mode, tarval *con) {
940 return new_rd_Const(NULL, irg, block, mode, con);
943 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
944 ir_mode *mode, long value) {
945 return new_rd_Const_long(NULL, irg, block, mode, value);
948 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
949 ir_mode *mode, tarval *con, type *tp) {
950 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
953 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
954 symconst_symbol value, symconst_kind symkind) {
955 return new_rd_SymConst(NULL, irg, block, value, symkind);
957 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
958 ir_node *objptr, int n_index, ir_node **index,
960 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
962 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
964 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
966 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
967 ir_node *callee, int arity, ir_node **in,
969 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
971 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
972 ir_node *op1, ir_node *op2, ir_mode *mode) {
973 return new_rd_Add(NULL, irg, block, op1, op2, mode);
975 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
976 ir_node *op1, ir_node *op2, ir_mode *mode) {
977 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
979 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
980 ir_node *op, ir_mode *mode) {
981 return new_rd_Minus(NULL, irg, block, op, mode);
983 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
984 ir_node *op1, ir_node *op2, ir_mode *mode) {
985 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
987 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
988 ir_node *memop, ir_node *op1, ir_node *op2) {
989 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
991 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
992 ir_node *memop, ir_node *op1, ir_node *op2) {
993 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
995 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
996 ir_node *memop, ir_node *op1, ir_node *op2) {
997 return new_rd_Div(NULL, irg, block, memop, op1, op2);
999 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1000 ir_node *memop, ir_node *op1, ir_node *op2) {
1001 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1003 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1004 ir_node *op, ir_mode *mode) {
1005 return new_rd_Abs(NULL, irg, block, op, mode);
1007 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1008 ir_node *op1, ir_node *op2, ir_mode *mode) {
1009 return new_rd_And(NULL, irg, block, op1, op2, mode);
1011 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1012 ir_node *op1, ir_node *op2, ir_mode *mode) {
1013 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1015 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1016 ir_node *op1, ir_node *op2, ir_mode *mode) {
1017 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1019 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1020 ir_node *op, ir_mode *mode) {
1021 return new_rd_Not(NULL, irg, block, op, mode);
1023 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1024 ir_node *op1, ir_node *op2) {
1025 return new_rd_Cmp(NULL, irg, block, op1, op2);
1027 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1028 ir_node *op, ir_node *k, ir_mode *mode) {
1029 return new_rd_Shl(NULL, irg, block, op, k, mode);
1031 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1032 ir_node *op, ir_node *k, ir_mode *mode) {
1033 return new_rd_Shr(NULL, irg, block, op, k, mode);
1035 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1036 ir_node *op, ir_node *k, ir_mode *mode) {
1037 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1039 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1040 ir_node *op, ir_node *k, ir_mode *mode) {
1041 return new_rd_Rot(NULL, irg, block, op, k, mode);
1043 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1044 ir_node *op, ir_mode *mode) {
1045 return new_rd_Conv(NULL, irg, block, op, mode);
1047 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1048 return new_rd_Cast(NULL, irg, block, op, to_tp);
1050 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1051 ir_node **in, ir_mode *mode) {
1052 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1054 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1055 ir_node *store, ir_node *adr, ir_mode *mode) {
1056 return new_rd_Load(NULL, irg, block, store, adr, mode);
1058 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1059 ir_node *store, ir_node *adr, ir_node *val) {
1060 return new_rd_Store(NULL, irg, block, store, adr, val);
1062 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1063 ir_node *size, type *alloc_type, where_alloc where) {
1064 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1066 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1067 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1068 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1070 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1071 return new_rd_Sync(NULL, irg, block, arity, in);
1073 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1074 ir_mode *mode, long proj) {
1075 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1077 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1079 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1081 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1082 int arity, ir_node **in) {
1083 return new_rd_Tuple(NULL, irg, block, arity, in );
1085 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1086 ir_node *val, ir_mode *mode) {
1087 return new_rd_Id(NULL, irg, block, val, mode);
1089 ir_node *new_r_Bad (ir_graph *irg) {
1090 return new_rd_Bad(irg);
1092 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1093 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1095 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1096 return new_rd_Unknown(irg, m);
1098 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1099 return new_rd_CallBegin(NULL, irg, block, callee);
1101 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1102 return new_rd_EndReg(NULL, irg, block);
1104 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1105 return new_rd_EndExcept(NULL, irg, block);
1107 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1108 return new_rd_Break(NULL, irg, block);
1110 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1111 ir_mode *mode, long proj) {
1112 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1114 ir_node *new_r_NoMem (ir_graph *irg) {
1115 return new_rd_NoMem(irg);
1117 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1118 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1119 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1123 /** ********************/
1124 /** public interfaces */
1125 /** construction tools */
1129 * - create a new Start node in the current block
1131 * @return s - pointer to the created Start node
1136 new_d_Start (dbg_info* db)
1140 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1141 op_Start, mode_T, 0, NULL);
1142 /* res->attr.start.irg = current_ir_graph; */
1144 res = optimize_node(res);
1145 IRN_VRFY_IRG(res, current_ir_graph);
1150 new_d_End (dbg_info* db)
1153 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1154 op_End, mode_X, -1, NULL);
1155 res = optimize_node(res);
1156 IRN_VRFY_IRG(res, current_ir_graph);
1161 /* Constructs a Block with a fixed number of predecessors.
1162 Does set current_block. Can be used with automatic Phi
1163 node construction. */
1165 new_d_Block (dbg_info* db, int arity, ir_node **in)
1169 bool has_unknown = false;
1171 res = new_rd_Block(db, current_ir_graph, arity, in);
1173 /* Create and initialize array for Phi-node construction. */
1174 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1175 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1176 current_ir_graph->n_loc);
1177 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1180 for (i = arity-1; i >= 0; i--)
1181 if (get_irn_op(in[i]) == op_Unknown) {
1186 if (!has_unknown) res = optimize_node(res);
1187 current_ir_graph->current_block = res;
1189 IRN_VRFY_IRG(res, current_ir_graph);
1194 /* ***********************************************************************/
1195 /* Methods necessary for automatic Phi node creation */
1197 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1198 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1199 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1200 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1202 Call Graph: ( A ---> B == A "calls" B)
1204 get_value mature_immBlock
1212 get_r_value_internal |
1216 new_rd_Phi0 new_rd_Phi_in
1218 * *************************************************************************** */
1220 /** Creates a Phi node with 0 predecessors */
1221 static INLINE ir_node *
1222 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1226 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1227 IRN_VRFY_IRG(res, irg);
1231 /* There are two implementations of the Phi node construction. The first
1232 is faster, but does not work for blocks with more than 2 predecessors.
1233 The second works always but is slower and causes more unnecessary Phi
1235 Select the implementations by the following preprocessor flag set in
1237 #if USE_FAST_PHI_CONSTRUCTION
1239 /* This is a stack used for allocating and deallocating nodes in
1240 new_rd_Phi_in. The original implementation used the obstack
1241 to model this stack, now it is explicit. This reduces side effects.
1243 #if USE_EXPLICIT_PHI_IN_STACK
1245 new_Phi_in_stack(void) {
1248 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1250 res->stack = NEW_ARR_F (ir_node *, 0);
1257 free_Phi_in_stack(Phi_in_stack *s) {
1258 DEL_ARR_F(s->stack);
1262 free_to_Phi_in_stack(ir_node *phi) {
1263 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1264 current_ir_graph->Phi_in_stack->pos)
1265 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1267 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1269 (current_ir_graph->Phi_in_stack->pos)++;
1272 static INLINE ir_node *
1273 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1274 int arity, ir_node **in) {
1276 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1277 int pos = current_ir_graph->Phi_in_stack->pos;
1281 /* We need to allocate a new node */
1282 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1283 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1285 /* reuse the old node and initialize it again. */
1288 assert (res->kind == k_ir_node);
1289 assert (res->op == op_Phi);
1293 assert (arity >= 0);
1294 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1295 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1297 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1299 (current_ir_graph->Phi_in_stack->pos)--;
1303 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1305 /* Creates a Phi node with a given, fixed array **in of predecessors.
1306 If the Phi node is unnecessary, as the same value reaches the block
1307 through all control flow paths, it is eliminated and the value
1308 returned directly. This constructor is only intended for use in
1309 the automatic Phi node generation triggered by get_value or mature.
1310 The implementation is quite tricky and depends on the fact, that
1311 the nodes are allocated on a stack:
1312 The in array contains predecessors and NULLs. The NULLs appear,
1313 if get_r_value_internal, that computed the predecessors, reached
1314 the same block on two paths. In this case the same value reaches
1315 this block on both paths, there is no definition in between. We need
1316 not allocate a Phi where these path's merge, but we have to communicate
1317 this fact to the caller. This happens by returning a pointer to the
1318 node the caller _will_ allocate. (Yes, we predict the address. We can
1319 do so because the nodes are allocated on the obstack.) The caller then
1320 finds a pointer to itself and, when this routine is called again,
1323 static INLINE ir_node *
1324 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1327 ir_node *res, *known;
1329 /* Allocate a new node on the obstack. This can return a node to
1330 which some of the pointers in the in-array already point.
1331 Attention: the constructor copies the in array, i.e., the later
1332 changes to the array in this routine do not affect the
1333 constructed node! If the in array contains NULLs, there will be
1334 missing predecessors in the returned node. Is this a possible
1335 internal state of the Phi node generation? */
1336 #if USE_EXPLICIT_PHI_IN_STACK
1337 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1339 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1340 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1343 /* The in-array can contain NULLs. These were returned by
1344 get_r_value_internal if it reached the same block/definition on a
1345 second path. The NULLs are replaced by the node itself to
1346 simplify the test in the next loop. */
1347 for (i = 0; i < ins; ++i) {
1352 /* This loop checks whether the Phi has more than one predecessor.
1353 If so, it is a real Phi node and we break the loop. Else the Phi
1354 node merges the same definition on several paths and therefore is
1356 for (i = 0; i < ins; ++i) {
1357 if (in[i] == res || in[i] == known)
1366 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1368 #if USE_EXPLICIT_PHI_IN_STACK
1369 free_to_Phi_in_stack(res);
1371 edges_node_deleted(res, current_ir_graph);
1372 obstack_free(current_ir_graph->obst, res);
1376 res = optimize_node (res);
1377 IRN_VRFY_IRG(res, irg);
1380 /* return the pointer to the Phi node. This node might be deallocated! */
1385 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1388 allocates and returns this node. The routine called to allocate the
1389 node might optimize it away and return a real value, or even a pointer
1390 to a deallocated Phi node on top of the obstack!
1391 This function is called with an in-array of proper size. **/
1393 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1395 ir_node *prevBlock, *res;
1398 /* This loop goes to all predecessor blocks of the block the Phi node is in
1399 and there finds the operands of the Phi node by calling
1400 get_r_value_internal. */
1401 for (i = 1; i <= ins; ++i) {
1402 assert (block->in[i]);
1403 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1405 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1408 /* After collecting all predecessors into the array nin a new Phi node
1409 with these predecessors is created. This constructor contains an
1410 optimization: If all predecessors of the Phi node are identical it
1411 returns the only operand instead of a new Phi node. If the value
1412 passes two different control flow edges without being defined, and
1413 this is the second path treated, a pointer to the node that will be
1414 allocated for the first path (recursion) is returned. We already
1415 know the address of this node, as it is the next node to be allocated
1416 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1417 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1419 /* Now we now the value for "pos" and can enter it in the array with
1420 all known local variables. Attention: this might be a pointer to
1421 a node, that later will be allocated!!! See new_rd_Phi_in.
1422 If this is called in mature, after some set_value in the same block,
1423 the proper value must not be overwritten:
1425 get_value (makes Phi0, put's it into graph_arr)
1426 set_value (overwrites Phi0 in graph_arr)
1427 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1430 if (!block->attr.block.graph_arr[pos]) {
1431 block->attr.block.graph_arr[pos] = res;
1433 /* printf(" value already computed by %s\n",
1434 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1440 /* This function returns the last definition of a variable. In case
1441 this variable was last defined in a previous block, Phi nodes are
1442 inserted. If the part of the firm graph containing the definition
1443 is not yet constructed, a dummy Phi node is returned. */
1445 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1448 /* There are 4 cases to treat.
1450 1. The block is not mature and we visit it the first time. We can not
1451 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1452 predecessors is returned. This node is added to the linked list (field
1453 "link") of the containing block to be completed when this block is
1454 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1457 2. The value is already known in this block, graph_arr[pos] is set and we
1458 visit the block the first time. We can return the value without
1459 creating any new nodes.
1461 3. The block is mature and we visit it the first time. A Phi node needs
1462 to be created (phi_merge). If the Phi is not needed, as all it's
1463 operands are the same value reaching the block through different
1464 paths, it's optimized away and the value itself is returned.
1466 4. The block is mature, and we visit it the second time. Now two
1467 subcases are possible:
1468 * The value was computed completely the last time we were here. This
1469 is the case if there is no loop. We can return the proper value.
1470 * The recursion that visited this node and set the flag did not
1471 return yet. We are computing a value in a loop and need to
1472 break the recursion without knowing the result yet.
1473 @@@ strange case. Straight forward we would create a Phi before
1474 starting the computation of it's predecessors. In this case we will
1475 find a Phi here in any case. The problem is that this implementation
1476 only creates a Phi after computing the predecessors, so that it is
1477 hard to compute self references of this Phi. @@@
1478 There is no simple check for the second subcase. Therefore we check
1479 for a second visit and treat all such cases as the second subcase.
1480 Anyways, the basic situation is the same: we reached a block
1481 on two paths without finding a definition of the value: No Phi
1482 nodes are needed on both paths.
1483 We return this information "Two paths, no Phi needed" by a very tricky
1484 implementation that relies on the fact that an obstack is a stack and
1485 will return a node with the same address on different allocations.
1486 Look also at phi_merge and new_rd_phi_in to understand this.
1487 @@@ Unfortunately this does not work, see testprogram
1488 three_cfpred_example.
1492 /* case 4 -- already visited. */
1493 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1495 /* visited the first time */
1496 set_irn_visited(block, get_irg_visited(current_ir_graph));
1498 /* Get the local valid value */
1499 res = block->attr.block.graph_arr[pos];
1501 /* case 2 -- If the value is actually computed, return it. */
1502 if (res) return res;
1504 if (block->attr.block.matured) { /* case 3 */
1506 /* The Phi has the same amount of ins as the corresponding block. */
1507 int ins = get_irn_arity(block);
1509 NEW_ARR_A (ir_node *, nin, ins);
1511 /* Phi merge collects the predecessors and then creates a node. */
1512 res = phi_merge (block, pos, mode, nin, ins);
1514 } else { /* case 1 */
1515 /* The block is not mature, we don't know how many in's are needed. A Phi
1516 with zero predecessors is created. Such a Phi node is called Phi0
1517 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1518 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1520 The Phi0 has to remember the pos of it's internal value. If the real
1521 Phi is computed, pos is used to update the array with the local
1524 res = new_rd_Phi0 (current_ir_graph, block, mode);
1525 res->attr.phi0_pos = pos;
1526 res->link = block->link;
1530 /* If we get here, the frontend missed a use-before-definition error */
1533 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1534 assert (mode->code >= irm_F && mode->code <= irm_P);
1535 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1536 tarval_mode_null[mode->code]);
1539 /* The local valid value is available now. */
1540 block->attr.block.graph_arr[pos] = res;
1548 it starts the recursion. This causes an Id at the entry of
1549 every block that has no definition of the value! **/
1551 #if USE_EXPLICIT_PHI_IN_STACK
1553 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1554 void free_Phi_in_stack(Phi_in_stack *s) { }
1557 static INLINE ir_node *
1558 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1559 ir_node **in, int ins, ir_node *phi0)
1562 ir_node *res, *known;
1564 /* Allocate a new node on the obstack. The allocation copies the in
1566 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1567 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1569 /* This loop checks whether the Phi has more than one predecessor.
1570 If so, it is a real Phi node and we break the loop. Else the
1571 Phi node merges the same definition on several paths and therefore
1572 is not needed. Don't consider Bad nodes! */
1574 for (i=0; i < ins; ++i)
1578 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1580 /* Optimize self referencing Phis: We can't detect them yet properly, as
1581 they still refer to the Phi0 they will replace. So replace right now. */
1582 if (phi0 && in[i] == phi0) in[i] = res;
1584 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1592 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1595 edges_node_deleted(res, current_ir_graph);
1596 obstack_free (current_ir_graph->obst, res);
1597 if (is_Phi(known)) {
1598 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1599 order, an enclosing Phi know may get superfluous. */
1600 res = optimize_in_place_2(known);
1602 exchange(known, res);
1608 /* A undefined value, e.g., in unreachable code. */
1612 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1613 IRN_VRFY_IRG(res, irg);
1614 /* Memory Phis in endless loops must be kept alive.
1615 As we can't distinguish these easily we keep all of them alive. */
1616 if ((res->op == op_Phi) && (mode == mode_M))
1617 add_End_keepalive(irg->end, res);
1624 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1626 #if PRECISE_EXC_CONTEXT
1628 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1630 /* Construct a new frag_array for node n.
1631 Copy the content from the current graph_arr of the corresponding block:
1632 this is the current state.
1633 Set ProjM(n) as current memory state.
1634 Further the last entry in frag_arr of current block points to n. This
1635 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1637 static INLINE ir_node ** new_frag_arr (ir_node *n)
1642 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1643 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1644 sizeof(ir_node *)*current_ir_graph->n_loc);
1646 /* turn off optimization before allocating Proj nodes, as res isn't
1648 opt = get_opt_optimize(); set_optimize(0);
1649 /* Here we rely on the fact that all frag ops have Memory as first result! */
1650 if (get_irn_op(n) == op_Call)
1651 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1653 assert((pn_Quot_M == pn_DivMod_M) &&
1654 (pn_Quot_M == pn_Div_M) &&
1655 (pn_Quot_M == pn_Mod_M) &&
1656 (pn_Quot_M == pn_Load_M) &&
1657 (pn_Quot_M == pn_Store_M) &&
1658 (pn_Quot_M == pn_Alloc_M) );
1659 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1663 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1668 * returns the frag_arr from a node
1670 static INLINE ir_node **
1671 get_frag_arr (ir_node *n) {
1672 switch (get_irn_opcode(n)) {
1674 return n->attr.call.exc.frag_arr;
1676 return n->attr.a.exc.frag_arr;
1678 return n->attr.load.exc.frag_arr;
1680 return n->attr.store.exc.frag_arr;
1682 return n->attr.except.frag_arr;
1687 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1689 if (!frag_arr[pos]) frag_arr[pos] = val;
1690 if (frag_arr[current_ir_graph->n_loc - 1]) {
1691 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1692 assert(arr != frag_arr && "Endless recursion detected");
1693 set_frag_value(arr, pos, val);
1698 for (i = 0; i < 1000; ++i) {
1699 if (!frag_arr[pos]) {
1700 frag_arr[pos] = val;
1702 if (frag_arr[current_ir_graph->n_loc - 1]) {
1703 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1709 assert(0 && "potential endless recursion");
1714 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1718 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1720 frag_arr = get_frag_arr(cfOp);
1721 res = frag_arr[pos];
1723 if (block->attr.block.graph_arr[pos]) {
1724 /* There was a set_value after the cfOp and no get_value before that
1725 set_value. We must build a Phi node now. */
1726 if (block->attr.block.matured) {
1727 int ins = get_irn_arity(block);
1729 NEW_ARR_A (ir_node *, nin, ins);
1730 res = phi_merge(block, pos, mode, nin, ins);
1732 res = new_rd_Phi0 (current_ir_graph, block, mode);
1733 res->attr.phi0_pos = pos;
1734 res->link = block->link;
1738 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1739 but this should be better: (remove comment if this works) */
1740 /* It's a Phi, we can write this into all graph_arrs with NULL */
1741 set_frag_value(block->attr.block.graph_arr, pos, res);
1743 res = get_r_value_internal(block, pos, mode);
1744 set_frag_value(block->attr.block.graph_arr, pos, res);
1752 computes the predecessors for the real phi node, and then
1753 allocates and returns this node. The routine called to allocate the
1754 node might optimize it away and return a real value.
1755 This function must be called with an in-array of proper size. **/
1757 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1759 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1762 /* If this block has no value at pos create a Phi0 and remember it
1763 in graph_arr to break recursions.
1764 Else we may not set graph_arr as there a later value is remembered. */
1766 if (!block->attr.block.graph_arr[pos]) {
1767 if (block == get_irg_start_block(current_ir_graph)) {
1768 /* Collapsing to Bad tarvals is no good idea.
1769 So we call a user-supplied routine here that deals with this case as
1770 appropriate for the given language. Sorryly the only help we can give
1771 here is the position.
1773 Even if all variables are defined before use, it can happen that
1774 we get to the start block, if a cond has been replaced by a tuple
1775 (bad, jmp). In this case we call the function needlessly, eventually
1776 generating an non existant error.
1777 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1780 if (default_initialize_local_variable)
1781 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1783 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1784 /* We don't need to care about exception ops in the start block.
1785 There are none by definition. */
1786 return block->attr.block.graph_arr[pos];
1788 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1789 block->attr.block.graph_arr[pos] = phi0;
1790 #if PRECISE_EXC_CONTEXT
1791 if (get_opt_precise_exc_context()) {
1792 /* Set graph_arr for fragile ops. Also here we should break recursion.
1793 We could choose a cyclic path through an cfop. But the recursion would
1794 break at some point. */
1795 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1801 /* This loop goes to all predecessor blocks of the block the Phi node
1802 is in and there finds the operands of the Phi node by calling
1803 get_r_value_internal. */
1804 for (i = 1; i <= ins; ++i) {
1805 prevCfOp = skip_Proj(block->in[i]);
1807 if (is_Bad(prevCfOp)) {
1808 /* In case a Cond has been optimized we would get right to the start block
1809 with an invalid definition. */
1810 nin[i-1] = new_Bad();
1813 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1815 if (!is_Bad(prevBlock)) {
1816 #if PRECISE_EXC_CONTEXT
1817 if (get_opt_precise_exc_context() &&
1818 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1819 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1820 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1823 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1825 nin[i-1] = new_Bad();
1829 /* We want to pass the Phi0 node to the constructor: this finds additional
1830 optimization possibilities.
1831 The Phi0 node either is allocated in this function, or it comes from
1832 a former call to get_r_value_internal. In this case we may not yet
1833 exchange phi0, as this is done in mature_immBlock. */
1835 phi0_all = block->attr.block.graph_arr[pos];
1836 if (!((get_irn_op(phi0_all) == op_Phi) &&
1837 (get_irn_arity(phi0_all) == 0) &&
1838 (get_nodes_block(phi0_all) == block)))
1844 /* After collecting all predecessors into the array nin a new Phi node
1845 with these predecessors is created. This constructor contains an
1846 optimization: If all predecessors of the Phi node are identical it
1847 returns the only operand instead of a new Phi node. */
1848 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1850 /* In case we allocated a Phi0 node at the beginning of this procedure,
1851 we need to exchange this Phi0 with the real Phi. */
1853 exchange(phi0, res);
1854 block->attr.block.graph_arr[pos] = res;
1855 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1856 only an optimization. */
1862 /* This function returns the last definition of a variable. In case
1863 this variable was last defined in a previous block, Phi nodes are
1864 inserted. If the part of the firm graph containing the definition
1865 is not yet constructed, a dummy Phi node is returned. */
1867 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1870 /* There are 4 cases to treat.
1872 1. The block is not mature and we visit it the first time. We can not
1873 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1874 predecessors is returned. This node is added to the linked list (field
1875 "link") of the containing block to be completed when this block is
1876 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1879 2. The value is already known in this block, graph_arr[pos] is set and we
1880 visit the block the first time. We can return the value without
1881 creating any new nodes.
1883 3. The block is mature and we visit it the first time. A Phi node needs
1884 to be created (phi_merge). If the Phi is not needed, as all it's
1885 operands are the same value reaching the block through different
1886 paths, it's optimized away and the value itself is returned.
1888 4. The block is mature, and we visit it the second time. Now two
1889 subcases are possible:
1890 * The value was computed completely the last time we were here. This
1891 is the case if there is no loop. We can return the proper value.
1892 * The recursion that visited this node and set the flag did not
1893 return yet. We are computing a value in a loop and need to
1894 break the recursion. This case only happens if we visited
1895 the same block with phi_merge before, which inserted a Phi0.
1896 So we return the Phi0.
1899 /* case 4 -- already visited. */
1900 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1901 /* As phi_merge allocates a Phi0 this value is always defined. Here
1902 is the critical difference of the two algorithms. */
1903 assert(block->attr.block.graph_arr[pos]);
1904 return block->attr.block.graph_arr[pos];
1907 /* visited the first time */
1908 set_irn_visited(block, get_irg_visited(current_ir_graph));
1910 /* Get the local valid value */
1911 res = block->attr.block.graph_arr[pos];
1913 /* case 2 -- If the value is actually computed, return it. */
1914 if (res) { return res; };
1916 if (block->attr.block.matured) { /* case 3 */
1918 /* The Phi has the same amount of ins as the corresponding block. */
1919 int ins = get_irn_arity(block);
1921 NEW_ARR_A (ir_node *, nin, ins);
1923 /* Phi merge collects the predecessors and then creates a node. */
1924 res = phi_merge (block, pos, mode, nin, ins);
1926 } else { /* case 1 */
1927 /* The block is not mature, we don't know how many in's are needed. A Phi
1928 with zero predecessors is created. Such a Phi node is called Phi0
1929 node. The Phi0 is then added to the list of Phi0 nodes in this block
1930 to be matured by mature_immBlock later.
1931 The Phi0 has to remember the pos of it's internal value. If the real
1932 Phi is computed, pos is used to update the array with the local
1934 res = new_rd_Phi0 (current_ir_graph, block, mode);
1935 res->attr.phi0_pos = pos;
1936 res->link = block->link;
1940 /* If we get here, the frontend missed a use-before-definition error */
1943 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1944 assert (mode->code >= irm_F && mode->code <= irm_P);
1945 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1946 get_mode_null(mode));
1949 /* The local valid value is available now. */
1950 block->attr.block.graph_arr[pos] = res;
1955 #endif /* USE_FAST_PHI_CONSTRUCTION */
1957 /* ************************************************************************** */
1959 /** Finalize a Block node, when all control flows are known. */
1960 /** Acceptable parameters are only Block nodes. */
1962 mature_immBlock (ir_node *block)
1969 assert (get_irn_opcode(block) == iro_Block);
1970 /* @@@ should be commented in
1971 assert (!get_Block_matured(block) && "Block already matured"); */
1973 if (!get_Block_matured(block)) {
1974 ins = ARR_LEN (block->in)-1;
1975 /* Fix block parameters */
1976 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1978 /* An array for building the Phi nodes. */
1979 NEW_ARR_A (ir_node *, nin, ins);
1981 /* Traverse a chain of Phi nodes attached to this block and mature
1983 for (n = block->link; n; n=next) {
1984 inc_irg_visited(current_ir_graph);
1986 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1989 block->attr.block.matured = 1;
1991 /* Now, as the block is a finished firm node, we can optimize it.
1992 Since other nodes have been allocated since the block was created
1993 we can not free the node on the obstack. Therefore we have to call
1995 Unfortunately the optimization does not change a lot, as all allocated
1996 nodes refer to the unoptimized node.
1997 We can call _2, as global cse has no effect on blocks. */
1998 block = optimize_in_place_2(block);
1999 IRN_VRFY_IRG(block, current_ir_graph);
2004 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
2006 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
2011 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
2013 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
2018 new_d_Const_long(dbg_info* db, ir_mode *mode, long value)
2020 return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value);
2024 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
2026 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
2032 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
2034 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
2039 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
2041 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2046 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2049 assert(arg->op == op_Cond);
2050 arg->attr.c.kind = fragmentary;
2051 arg->attr.c.default_proj = max_proj;
2052 res = new_Proj (arg, mode_X, max_proj);
2057 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2059 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2064 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2066 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2070 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2072 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2077 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2079 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2084 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2086 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2092 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2094 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2099 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2101 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2106 * allocate the frag array
2108 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2109 if (get_opt_precise_exc_context()) {
2110 if ((current_ir_graph->phase_state == phase_building) &&
2111 (get_irn_op(res) == op) && /* Could be optimized away. */
2112 !*frag_store) /* Could be a cse where the arr is already set. */ {
2113 *frag_store = new_frag_arr(res);
2120 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2123 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2125 res->attr.except.pin_state = op_pin_state_pinned;
2126 #if PRECISE_EXC_CONTEXT
2127 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2134 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2137 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2139 res->attr.except.pin_state = op_pin_state_pinned;
2140 #if PRECISE_EXC_CONTEXT
2141 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2148 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2151 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2153 res->attr.except.pin_state = op_pin_state_pinned;
2154 #if PRECISE_EXC_CONTEXT
2155 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2162 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2165 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2167 res->attr.except.pin_state = op_pin_state_pinned;
2168 #if PRECISE_EXC_CONTEXT
2169 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2176 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2178 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2183 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2185 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2190 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2192 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2197 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2199 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2204 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2206 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2211 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2213 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2218 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2220 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2225 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2227 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2232 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2234 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2239 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2241 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2246 new_d_Jmp (dbg_info* db)
2248 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2252 new_d_IJmp (dbg_info* db, ir_node *tgt)
2254 return new_rd_IJmp (db, current_ir_graph, current_ir_graph->current_block, tgt);
2258 new_d_Cond (dbg_info* db, ir_node *c)
2260 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2264 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2268 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2269 store, callee, arity, in, tp);
2270 #if PRECISE_EXC_CONTEXT
2271 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2278 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2280 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2285 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2287 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2292 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2295 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2297 #if PRECISE_EXC_CONTEXT
2298 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2305 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2308 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2310 #if PRECISE_EXC_CONTEXT
2311 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2318 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2322 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2323 store, size, alloc_type, where);
2324 #if PRECISE_EXC_CONTEXT
2325 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2332 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2333 ir_node *size, type *free_type, where_alloc where)
2335 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2336 store, ptr, size, free_type, where);
2340 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2341 /* GL: objptr was called frame before. Frame was a bad choice for the name
2342 as the operand could as well be a pointer to a dynamic object. */
2344 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2345 store, objptr, 0, NULL, ent);
2349 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2351 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2352 store, objptr, n_index, index, sel);
2356 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2358 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2359 store, objptr, ent));
2363 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2365 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2370 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2372 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2377 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2379 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2387 return _new_d_Bad();
2391 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2393 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2398 new_d_Unknown (ir_mode *m)
2400 return new_rd_Unknown(current_ir_graph, m);
2404 new_d_CallBegin (dbg_info *db, ir_node *call)
2407 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2412 new_d_EndReg (dbg_info *db)
2415 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2420 new_d_EndExcept (dbg_info *db)
2423 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2428 new_d_Break (dbg_info *db)
2430 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2434 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2436 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2443 return _new_d_NoMem();
2447 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2448 ir_node *ir_true, ir_mode *mode) {
2449 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2450 sel, ir_false, ir_true, mode);
2453 /* ********************************************************************* */
2454 /* Comfortable interface with automatic Phi node construction. */
2455 /* (Uses also constructors of ?? interface, except new_Block. */
2456 /* ********************************************************************* */
2458 /* * Block construction **/
2459 /* immature Block without predecessors */
2460 ir_node *new_d_immBlock (dbg_info* db) {
2463 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2464 /* creates a new dynamic in-array as length of in is -1 */
2465 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2466 current_ir_graph->current_block = res;
2467 res->attr.block.matured = 0;
2468 res->attr.block.dead = 0;
2469 /* res->attr.block.exc = exc_normal; */
2470 /* res->attr.block.handler_entry = 0; */
2471 res->attr.block.irg = current_ir_graph;
2472 res->attr.block.backedge = NULL;
2473 res->attr.block.in_cg = NULL;
2474 res->attr.block.cg_backedge = NULL;
2475 set_Block_block_visited(res, 0);
2477 /* Create and initialize array for Phi-node construction. */
2478 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2479 current_ir_graph->n_loc);
2480 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2482 /* Immature block may not be optimized! */
2483 IRN_VRFY_IRG(res, current_ir_graph);
2489 new_immBlock (void) {
2490 return new_d_immBlock(NULL);
2493 /* add an edge to a jmp/control flow node */
2495 add_immBlock_pred (ir_node *block, ir_node *jmp)
2497 if (block->attr.block.matured) {
2498 assert(0 && "Error: Block already matured!\n");
2501 assert(jmp != NULL);
2502 ARR_APP1(ir_node *, block->in, jmp);
2506 /* changing the current block */
2508 set_cur_block (ir_node *target)
2510 current_ir_graph->current_block = target;
2513 /* ************************ */
2514 /* parameter administration */
2516 /* get a value from the parameter array from the current block by its index */
2518 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2520 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2521 inc_irg_visited(current_ir_graph);
2523 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2525 /* get a value from the parameter array from the current block by its index */
2527 get_value (int pos, ir_mode *mode)
2529 return get_d_value(NULL, pos, mode);
2532 /* set a value at position pos in the parameter array from the current block */
2534 set_value (int pos, ir_node *value)
2536 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2537 assert(pos+1 < current_ir_graph->n_loc);
2538 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2541 /* get the current store */
2545 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2546 /* GL: one could call get_value instead */
2547 inc_irg_visited(current_ir_graph);
2548 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2551 /* set the current store */
2553 set_store (ir_node *store)
2555 /* GL: one could call set_value instead */
2556 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2557 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2561 keep_alive (ir_node *ka) {
2562 add_End_keepalive(current_ir_graph->end, ka);
2565 /** Useful access routines **/
2566 /* Returns the current block of the current graph. To set the current
2567 block use set_cur_block. */
2568 ir_node *get_cur_block() {
2569 return get_irg_current_block(current_ir_graph);
2572 /* Returns the frame type of the current graph */
2573 type *get_cur_frame_type() {
2574 return get_irg_frame_type(current_ir_graph);
2578 /* ********************************************************************* */
2581 /* call once for each run of the library */
2583 init_cons(uninitialized_local_variable_func_t *func)
2585 default_initialize_local_variable = func;
2588 /* call for each graph */
2590 irg_finalize_cons (ir_graph *irg) {
2591 irg->phase_state = phase_high;
2595 irp_finalize_cons (void) {
2596 int i, n_irgs = get_irp_n_irgs();
2597 for (i = 0; i < n_irgs; i++) {
2598 irg_finalize_cons(get_irp_irg(i));
2600 irp->phase_state = phase_high;\
2606 ir_node *new_Block(int arity, ir_node **in) {
2607 return new_d_Block(NULL, arity, in);
2609 ir_node *new_Start (void) {
2610 return new_d_Start(NULL);
2612 ir_node *new_End (void) {
2613 return new_d_End(NULL);
2615 ir_node *new_Jmp (void) {
2616 return new_d_Jmp(NULL);
2618 ir_node *new_IJmp (ir_node *tgt) {
2619 return new_d_IJmp(NULL, tgt);
2621 ir_node *new_Cond (ir_node *c) {
2622 return new_d_Cond(NULL, c);
2624 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2625 return new_d_Return(NULL, store, arity, in);
2627 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2628 return new_d_Raise(NULL, store, obj);
2630 ir_node *new_Const (ir_mode *mode, tarval *con) {
2631 return new_d_Const(NULL, mode, con);
2634 ir_node *new_Const_long(ir_mode *mode, long value)
2636 return new_d_Const_long(NULL, mode, value);
2639 ir_node *new_Const_type(tarval *con, type *tp) {
2640 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2643 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2644 return new_d_SymConst(NULL, value, kind);
2646 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2647 return new_d_simpleSel(NULL, store, objptr, ent);
2649 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2651 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2653 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2654 return new_d_InstOf (NULL, store, objptr, ent);
2656 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2658 return new_d_Call(NULL, store, callee, arity, in, tp);
2660 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2661 return new_d_Add(NULL, op1, op2, mode);
2663 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2664 return new_d_Sub(NULL, op1, op2, mode);
2666 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2667 return new_d_Minus(NULL, op, mode);
2669 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2670 return new_d_Mul(NULL, op1, op2, mode);
2672 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2673 return new_d_Quot(NULL, memop, op1, op2);
2675 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2676 return new_d_DivMod(NULL, memop, op1, op2);
2678 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2679 return new_d_Div(NULL, memop, op1, op2);
2681 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2682 return new_d_Mod(NULL, memop, op1, op2);
2684 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2685 return new_d_Abs(NULL, op, mode);
2687 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2688 return new_d_And(NULL, op1, op2, mode);
2690 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2691 return new_d_Or(NULL, op1, op2, mode);
2693 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2694 return new_d_Eor(NULL, op1, op2, mode);
2696 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2697 return new_d_Not(NULL, op, mode);
2699 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2700 return new_d_Shl(NULL, op, k, mode);
2702 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2703 return new_d_Shr(NULL, op, k, mode);
2705 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2706 return new_d_Shrs(NULL, op, k, mode);
2708 #define new_Rotate new_Rot
2709 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2710 return new_d_Rot(NULL, op, k, mode);
2712 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2713 return new_d_Cmp(NULL, op1, op2);
2715 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2716 return new_d_Conv(NULL, op, mode);
2718 ir_node *new_Cast (ir_node *op, type *to_tp) {
2719 return new_d_Cast(NULL, op, to_tp);
2721 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2722 return new_d_Phi(NULL, arity, in, mode);
2724 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2725 return new_d_Load(NULL, store, addr, mode);
2727 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2728 return new_d_Store(NULL, store, addr, val);
2730 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2731 where_alloc where) {
2732 return new_d_Alloc(NULL, store, size, alloc_type, where);
2734 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2735 type *free_type, where_alloc where) {
2736 return new_d_Free(NULL, store, ptr, size, free_type, where);
2738 ir_node *new_Sync (int arity, ir_node **in) {
2739 return new_d_Sync(NULL, arity, in);
2741 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2742 return new_d_Proj(NULL, arg, mode, proj);
2744 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2745 return new_d_defaultProj(NULL, arg, max_proj);
2747 ir_node *new_Tuple (int arity, ir_node **in) {
2748 return new_d_Tuple(NULL, arity, in);
2750 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2751 return new_d_Id(NULL, val, mode);
2753 ir_node *new_Bad (void) {
2756 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2757 return new_d_Confirm (NULL, val, bound, cmp);
2759 ir_node *new_Unknown(ir_mode *m) {
2760 return new_d_Unknown(m);
2762 ir_node *new_CallBegin (ir_node *callee) {
2763 return new_d_CallBegin(NULL, callee);
2765 ir_node *new_EndReg (void) {
2766 return new_d_EndReg(NULL);
2768 ir_node *new_EndExcept (void) {
2769 return new_d_EndExcept(NULL);
2771 ir_node *new_Break (void) {
2772 return new_d_Break(NULL);
2774 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2775 return new_d_Filter(NULL, arg, mode, proj);
2777 ir_node *new_NoMem (void) {
2778 return new_d_NoMem();
2780 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2781 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);