3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* --------------------------------------------- */
66 /* private interfaces, for professional use only */
67 /* --------------------------------------------- */
69 /* Constructs a Block with a fixed number of predecessors.
70 Does not set current_block. Can not be used with automatic
71 Phi node construction. */
73 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
77 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
78 set_Block_matured(res, 1);
79 set_Block_block_visited(res, 0);
81 /* res->attr.block.exc = exc_normal; */
82 /* res->attr.block.handler_entry = 0; */
83 res->attr.block.dead = 0;
84 res->attr.block.irg = irg;
85 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
86 res->attr.block.in_cg = NULL;
87 res->attr.block.cg_backedge = NULL;
88 res->attr.block.extblk = NULL;
90 IRN_VRFY_IRG(res, irg);
95 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
99 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
100 /* res->attr.start.irg = irg; */
102 IRN_VRFY_IRG(res, irg);
107 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
124 bool has_unknown = false;
126 /* Don't assert that block matured: the use of this constructor is strongly
128 if ( get_Block_matured(block) )
129 assert( get_irn_arity(block) == arity );
131 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
133 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
135 for (i = arity-1; i >= 0; i--)
136 if (get_irn_op(in[i]) == op_Unknown) {
141 if (!has_unknown) res = optimize_node (res);
142 IRN_VRFY_IRG(res, irg);
144 /* Memory Phis in endless loops must be kept alive.
145 As we can't distinguish these easily we keep all of them alive. */
146 if ((res->op == op_Phi) && (mode == mode_M))
147 add_End_keepalive(irg->end, res);
152 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
157 res->attr.con.tv = con;
158 set_Const_type(res, tp); /* Call method because of complex assertion. */
159 res = optimize_node (res);
160 assert(get_Const_type(res) == tp);
161 IRN_VRFY_IRG(res, irg);
167 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
169 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
173 new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
175 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
179 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
183 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
184 res = optimize_node(res);
185 IRN_VRFY_IRG(res, irg);
190 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
195 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
196 res->attr.proj = proj;
199 assert(get_Proj_pred(res));
200 assert(get_nodes_block(get_Proj_pred(res)));
202 res = optimize_node(res);
204 IRN_VRFY_IRG(res, irg);
210 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
214 assert(arg->op == op_Cond);
215 arg->attr.c.kind = fragmentary;
216 arg->attr.c.default_proj = max_proj;
217 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
222 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
226 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
227 res = optimize_node(res);
228 IRN_VRFY_IRG(res, irg);
233 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
237 assert(is_atomic_type(to_tp));
239 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
240 res->attr.cast.totype = to_tp;
241 res = optimize_node(res);
242 IRN_VRFY_IRG(res, irg);
247 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
251 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
252 res = optimize_node (res);
253 IRN_VRFY_IRG(res, irg);
258 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
259 ir_node *op1, ir_node *op2, ir_mode *mode)
266 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
267 res = optimize_node(res);
268 IRN_VRFY_IRG(res, irg);
273 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
274 ir_node *op1, ir_node *op2, ir_mode *mode)
281 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
282 res = optimize_node (res);
283 IRN_VRFY_IRG(res, irg);
288 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *op, ir_mode *mode)
293 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
294 res = optimize_node(res);
295 IRN_VRFY_IRG(res, irg);
300 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *op1, ir_node *op2, ir_mode *mode)
308 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
309 res = optimize_node(res);
310 IRN_VRFY_IRG(res, irg);
315 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
325 res = optimize_node(res);
326 IRN_VRFY_IRG(res, irg);
331 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
340 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
347 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *memop, ir_node *op1, ir_node *op2)
356 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
357 res = optimize_node(res);
358 IRN_VRFY_IRG(res, irg);
363 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
364 ir_node *memop, ir_node *op1, ir_node *op2)
372 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
373 res = optimize_node(res);
374 IRN_VRFY_IRG(res, irg);
379 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
380 ir_node *op1, ir_node *op2, ir_mode *mode)
387 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
388 res = optimize_node(res);
389 IRN_VRFY_IRG(res, irg);
394 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
395 ir_node *op1, ir_node *op2, ir_mode *mode)
402 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
403 res = optimize_node(res);
404 IRN_VRFY_IRG(res, irg);
409 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
410 ir_node *op1, ir_node *op2, ir_mode *mode)
417 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
418 res = optimize_node (res);
419 IRN_VRFY_IRG(res, irg);
424 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
425 ir_node *op, ir_mode *mode)
429 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
437 ir_node *op, ir_node *k, ir_mode *mode)
444 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
445 res = optimize_node(res);
446 IRN_VRFY_IRG(res, irg);
451 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
452 ir_node *op, ir_node *k, ir_mode *mode)
459 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
460 res = optimize_node(res);
461 IRN_VRFY_IRG(res, irg);
466 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
467 ir_node *op, ir_node *k, ir_mode *mode)
474 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
475 res = optimize_node(res);
476 IRN_VRFY_IRG(res, irg);
481 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
482 ir_node *op, ir_node *k, ir_mode *mode)
489 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
490 res = optimize_node(res);
491 IRN_VRFY_IRG(res, irg);
496 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
497 ir_node *op, ir_mode *mode)
501 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
502 res = optimize_node (res);
503 IRN_VRFY_IRG(res, irg);
508 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
509 ir_node *op1, ir_node *op2)
516 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
517 res = optimize_node(res);
518 IRN_VRFY_IRG(res, irg);
523 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
527 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
528 res = optimize_node (res);
529 IRN_VRFY_IRG (res, irg);
534 new_rd_IJmp (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *tgt)
538 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
539 res = optimize_node (res);
540 IRN_VRFY_IRG (res, irg);
542 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
548 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
552 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
553 res->attr.c.kind = dense;
554 res->attr.c.default_proj = 0;
555 res = optimize_node (res);
556 IRN_VRFY_IRG(res, irg);
561 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
562 ir_node *callee, int arity, ir_node **in, type *tp)
569 NEW_ARR_A(ir_node *, r_in, r_arity);
572 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
574 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
576 assert((get_unknown_type() == tp) || is_Method_type(tp));
577 set_Call_type(res, tp);
578 res->attr.call.exc.pin_state = op_pin_state_pinned;
579 res->attr.call.callee_arr = NULL;
580 res = optimize_node(res);
581 IRN_VRFY_IRG(res, irg);
586 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
587 ir_node *store, int arity, ir_node **in)
594 NEW_ARR_A (ir_node *, r_in, r_arity);
596 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
597 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
598 res = optimize_node(res);
599 IRN_VRFY_IRG(res, irg);
604 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
611 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
618 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
619 ir_node *store, ir_node *adr, ir_mode *mode)
626 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
627 res->attr.load.exc.pin_state = op_pin_state_pinned;
628 res->attr.load.load_mode = mode;
629 res->attr.load.volatility = volatility_non_volatile;
630 res = optimize_node(res);
631 IRN_VRFY_IRG(res, irg);
636 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
637 ir_node *store, ir_node *adr, ir_node *val)
645 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
646 res->attr.store.exc.pin_state = op_pin_state_pinned;
647 res->attr.store.volatility = volatility_non_volatile;
648 res = optimize_node(res);
649 IRN_VRFY_IRG(res, irg);
654 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
655 ir_node *size, type *alloc_type, where_alloc where)
662 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
663 res->attr.a.exc.pin_state = op_pin_state_pinned;
664 res->attr.a.where = where;
665 res->attr.a.type = alloc_type;
666 res = optimize_node(res);
667 IRN_VRFY_IRG(res, irg);
672 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
673 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
681 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
682 res->attr.f.where = where;
683 res->attr.f.type = free_type;
684 res = optimize_node(res);
685 IRN_VRFY_IRG(res, irg);
690 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
691 int arity, ir_node **in, entity *ent)
697 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
700 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
703 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
705 * FIXM: Sel's can select functions which should be of mode mode_P_code.
707 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
708 res->attr.s.ent = ent;
709 res = optimize_node(res);
710 IRN_VRFY_IRG(res, irg);
715 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
716 ir_node *objptr, type *ent)
723 NEW_ARR_A(ir_node *, r_in, r_arity);
727 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
728 res->attr.io.ent = ent;
730 /* res = optimize(res); */
731 IRN_VRFY_IRG(res, irg);
736 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
737 symconst_kind symkind, type *tp) {
741 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
742 mode = mode_P_data; /* FIXME: can be mode_P_code */
746 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
748 res->attr.i.num = symkind;
749 res->attr.i.sym = value;
752 res = optimize_node(res);
753 IRN_VRFY_IRG(res, irg);
758 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
759 symconst_kind symkind)
761 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
765 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
766 symconst_symbol sym = {(type *)symbol};
767 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
770 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
771 symconst_symbol sym = {(type *)symbol};
772 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
775 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
776 symconst_symbol sym = {symbol};
777 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
780 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
781 symconst_symbol sym = {symbol};
782 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
786 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
790 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
797 new_rd_Bad (ir_graph *irg)
803 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
805 ir_node *in[2], *res;
809 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
810 res->attr.confirm_cmp = cmp;
811 res = optimize_node (res);
812 IRN_VRFY_IRG(res, irg);
817 new_rd_Unknown (ir_graph *irg, ir_mode *m)
819 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
823 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
828 in[0] = get_Call_ptr(call);
829 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
830 /* res->attr.callbegin.irg = irg; */
831 res->attr.callbegin.call = call;
832 res = optimize_node(res);
833 IRN_VRFY_IRG(res, irg);
838 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
842 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
844 IRN_VRFY_IRG(res, irg);
849 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
853 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
854 irg->end_except = res;
855 IRN_VRFY_IRG (res, irg);
860 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
864 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
865 res = optimize_node(res);
866 IRN_VRFY_IRG(res, irg);
871 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
876 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
877 res->attr.filter.proj = proj;
878 res->attr.filter.in_cg = NULL;
879 res->attr.filter.backedge = NULL;
882 assert(get_Proj_pred(res));
883 assert(get_nodes_block(get_Proj_pred(res)));
885 res = optimize_node(res);
886 IRN_VRFY_IRG(res, irg);
891 new_rd_NoMem (ir_graph *irg) {
896 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
897 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
906 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
909 res = optimize_node(res);
910 IRN_VRFY_IRG(res, irg);
915 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
916 return new_rd_Block(NULL, irg, arity, in);
918 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
919 return new_rd_Start(NULL, irg, block);
921 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
922 return new_rd_End(NULL, irg, block);
924 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
925 return new_rd_Jmp(NULL, irg, block);
927 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
928 return new_rd_IJmp(NULL, irg, block, tgt);
930 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
931 return new_rd_Cond(NULL, irg, block, c);
933 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
934 ir_node *store, int arity, ir_node **in) {
935 return new_rd_Return(NULL, irg, block, store, arity, in);
937 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
938 ir_node *store, ir_node *obj) {
939 return new_rd_Raise(NULL, irg, block, store, obj);
941 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
942 ir_mode *mode, tarval *con) {
943 return new_rd_Const(NULL, irg, block, mode, con);
946 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
947 ir_mode *mode, long value) {
948 return new_rd_Const_long(NULL, irg, block, mode, value);
951 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
952 ir_mode *mode, tarval *con, type *tp) {
953 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
956 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
957 symconst_symbol value, symconst_kind symkind) {
958 return new_rd_SymConst(NULL, irg, block, value, symkind);
960 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
961 ir_node *objptr, int n_index, ir_node **index,
963 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
965 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
967 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
969 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
970 ir_node *callee, int arity, ir_node **in,
972 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
974 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
975 ir_node *op1, ir_node *op2, ir_mode *mode) {
976 return new_rd_Add(NULL, irg, block, op1, op2, mode);
978 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
979 ir_node *op1, ir_node *op2, ir_mode *mode) {
980 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
982 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
983 ir_node *op, ir_mode *mode) {
984 return new_rd_Minus(NULL, irg, block, op, mode);
986 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
987 ir_node *op1, ir_node *op2, ir_mode *mode) {
988 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
990 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
991 ir_node *memop, ir_node *op1, ir_node *op2) {
992 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
994 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
995 ir_node *memop, ir_node *op1, ir_node *op2) {
996 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
998 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
999 ir_node *memop, ir_node *op1, ir_node *op2) {
1000 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1002 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1003 ir_node *memop, ir_node *op1, ir_node *op2) {
1004 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1006 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1007 ir_node *op, ir_mode *mode) {
1008 return new_rd_Abs(NULL, irg, block, op, mode);
1010 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1011 ir_node *op1, ir_node *op2, ir_mode *mode) {
1012 return new_rd_And(NULL, irg, block, op1, op2, mode);
1014 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1015 ir_node *op1, ir_node *op2, ir_mode *mode) {
1016 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1018 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1019 ir_node *op1, ir_node *op2, ir_mode *mode) {
1020 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1022 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1023 ir_node *op, ir_mode *mode) {
1024 return new_rd_Not(NULL, irg, block, op, mode);
1026 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1027 ir_node *op1, ir_node *op2) {
1028 return new_rd_Cmp(NULL, irg, block, op1, op2);
1030 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1031 ir_node *op, ir_node *k, ir_mode *mode) {
1032 return new_rd_Shl(NULL, irg, block, op, k, mode);
1034 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1035 ir_node *op, ir_node *k, ir_mode *mode) {
1036 return new_rd_Shr(NULL, irg, block, op, k, mode);
1038 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1039 ir_node *op, ir_node *k, ir_mode *mode) {
1040 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1042 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1043 ir_node *op, ir_node *k, ir_mode *mode) {
1044 return new_rd_Rot(NULL, irg, block, op, k, mode);
1046 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1047 ir_node *op, ir_mode *mode) {
1048 return new_rd_Conv(NULL, irg, block, op, mode);
1050 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1051 return new_rd_Cast(NULL, irg, block, op, to_tp);
1053 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1054 ir_node **in, ir_mode *mode) {
1055 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1057 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1058 ir_node *store, ir_node *adr, ir_mode *mode) {
1059 return new_rd_Load(NULL, irg, block, store, adr, mode);
1061 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1062 ir_node *store, ir_node *adr, ir_node *val) {
1063 return new_rd_Store(NULL, irg, block, store, adr, val);
1065 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1066 ir_node *size, type *alloc_type, where_alloc where) {
1067 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1069 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1070 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1071 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1073 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1074 return new_rd_Sync(NULL, irg, block, arity, in);
1076 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1077 ir_mode *mode, long proj) {
1078 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1080 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1082 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1084 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1085 int arity, ir_node **in) {
1086 return new_rd_Tuple(NULL, irg, block, arity, in );
1088 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1089 ir_node *val, ir_mode *mode) {
1090 return new_rd_Id(NULL, irg, block, val, mode);
1092 ir_node *new_r_Bad (ir_graph *irg) {
1093 return new_rd_Bad(irg);
1095 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1096 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1098 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1099 return new_rd_Unknown(irg, m);
1101 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1102 return new_rd_CallBegin(NULL, irg, block, callee);
1104 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1105 return new_rd_EndReg(NULL, irg, block);
1107 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1108 return new_rd_EndExcept(NULL, irg, block);
1110 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1111 return new_rd_Break(NULL, irg, block);
1113 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1114 ir_mode *mode, long proj) {
1115 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1117 ir_node *new_r_NoMem (ir_graph *irg) {
1118 return new_rd_NoMem(irg);
1120 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1121 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1122 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1126 /** ********************/
1127 /** public interfaces */
1128 /** construction tools */
1132 * - create a new Start node in the current block
1134 * @return s - pointer to the created Start node
1139 new_d_Start (dbg_info* db)
1143 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1144 op_Start, mode_T, 0, NULL);
1145 /* res->attr.start.irg = current_ir_graph; */
1147 res = optimize_node(res);
1148 IRN_VRFY_IRG(res, current_ir_graph);
1153 new_d_End (dbg_info* db)
1156 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1157 op_End, mode_X, -1, NULL);
1158 res = optimize_node(res);
1159 IRN_VRFY_IRG(res, current_ir_graph);
1164 /* Constructs a Block with a fixed number of predecessors.
1165 Does set current_block. Can be used with automatic Phi
1166 node construction. */
1168 new_d_Block (dbg_info* db, int arity, ir_node **in)
1172 bool has_unknown = false;
1174 res = new_rd_Block(db, current_ir_graph, arity, in);
1176 /* Create and initialize array for Phi-node construction. */
1177 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1178 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1179 current_ir_graph->n_loc);
1180 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1183 for (i = arity-1; i >= 0; i--)
1184 if (get_irn_op(in[i]) == op_Unknown) {
1189 if (!has_unknown) res = optimize_node(res);
1190 current_ir_graph->current_block = res;
1192 IRN_VRFY_IRG(res, current_ir_graph);
1197 /* ***********************************************************************/
1198 /* Methods necessary for automatic Phi node creation */
1200 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1201 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1202 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1203 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1205 Call Graph: ( A ---> B == A "calls" B)
1207 get_value mature_immBlock
1215 get_r_value_internal |
1219 new_rd_Phi0 new_rd_Phi_in
1221 * *************************************************************************** */
1223 /** Creates a Phi node with 0 predecessors */
1224 static INLINE ir_node *
1225 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1229 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1230 IRN_VRFY_IRG(res, irg);
1234 /* There are two implementations of the Phi node construction. The first
1235 is faster, but does not work for blocks with more than 2 predecessors.
1236 The second works always but is slower and causes more unnecessary Phi
1238 Select the implementations by the following preprocessor flag set in
1240 #if USE_FAST_PHI_CONSTRUCTION
1242 /* This is a stack used for allocating and deallocating nodes in
1243 new_rd_Phi_in. The original implementation used the obstack
1244 to model this stack, now it is explicit. This reduces side effects.
1246 #if USE_EXPLICIT_PHI_IN_STACK
1248 new_Phi_in_stack(void) {
1251 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1253 res->stack = NEW_ARR_F (ir_node *, 0);
1260 free_Phi_in_stack(Phi_in_stack *s) {
1261 DEL_ARR_F(s->stack);
1265 free_to_Phi_in_stack(ir_node *phi) {
1266 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1267 current_ir_graph->Phi_in_stack->pos)
1268 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1270 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1272 (current_ir_graph->Phi_in_stack->pos)++;
1275 static INLINE ir_node *
1276 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1277 int arity, ir_node **in) {
1279 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1280 int pos = current_ir_graph->Phi_in_stack->pos;
1284 /* We need to allocate a new node */
1285 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1286 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1288 /* reuse the old node and initialize it again. */
1291 assert (res->kind == k_ir_node);
1292 assert (res->op == op_Phi);
1296 assert (arity >= 0);
1297 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1298 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1300 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1302 (current_ir_graph->Phi_in_stack->pos)--;
1306 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1308 /* Creates a Phi node with a given, fixed array **in of predecessors.
1309 If the Phi node is unnecessary, as the same value reaches the block
1310 through all control flow paths, it is eliminated and the value
1311 returned directly. This constructor is only intended for use in
1312 the automatic Phi node generation triggered by get_value or mature.
1313 The implementation is quite tricky and depends on the fact, that
1314 the nodes are allocated on a stack:
1315 The in array contains predecessors and NULLs. The NULLs appear,
1316 if get_r_value_internal, that computed the predecessors, reached
1317 the same block on two paths. In this case the same value reaches
1318 this block on both paths, there is no definition in between. We need
1319 not allocate a Phi where these path's merge, but we have to communicate
1320 this fact to the caller. This happens by returning a pointer to the
1321 node the caller _will_ allocate. (Yes, we predict the address. We can
1322 do so because the nodes are allocated on the obstack.) The caller then
1323 finds a pointer to itself and, when this routine is called again,
1326 static INLINE ir_node *
1327 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1330 ir_node *res, *known;
1332 /* Allocate a new node on the obstack. This can return a node to
1333 which some of the pointers in the in-array already point.
1334 Attention: the constructor copies the in array, i.e., the later
1335 changes to the array in this routine do not affect the
1336 constructed node! If the in array contains NULLs, there will be
1337 missing predecessors in the returned node. Is this a possible
1338 internal state of the Phi node generation? */
1339 #if USE_EXPLICIT_PHI_IN_STACK
1340 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1342 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1343 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1346 /* The in-array can contain NULLs. These were returned by
1347 get_r_value_internal if it reached the same block/definition on a
1348 second path. The NULLs are replaced by the node itself to
1349 simplify the test in the next loop. */
1350 for (i = 0; i < ins; ++i) {
1355 /* This loop checks whether the Phi has more than one predecessor.
1356 If so, it is a real Phi node and we break the loop. Else the Phi
1357 node merges the same definition on several paths and therefore is
1359 for (i = 0; i < ins; ++i) {
1360 if (in[i] == res || in[i] == known)
1369 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1371 #if USE_EXPLICIT_PHI_IN_STACK
1372 free_to_Phi_in_stack(res);
1374 edges_node_deleted(res, current_ir_graph);
1375 obstack_free(current_ir_graph->obst, res);
1379 res = optimize_node (res);
1380 IRN_VRFY_IRG(res, irg);
1383 /* return the pointer to the Phi node. This node might be deallocated! */
1388 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1391 allocates and returns this node. The routine called to allocate the
1392 node might optimize it away and return a real value, or even a pointer
1393 to a deallocated Phi node on top of the obstack!
1394 This function is called with an in-array of proper size. **/
1396 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1398 ir_node *prevBlock, *res;
1401 /* This loop goes to all predecessor blocks of the block the Phi node is in
1402 and there finds the operands of the Phi node by calling
1403 get_r_value_internal. */
1404 for (i = 1; i <= ins; ++i) {
1405 assert (block->in[i]);
1406 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1408 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1411 /* After collecting all predecessors into the array nin a new Phi node
1412 with these predecessors is created. This constructor contains an
1413 optimization: If all predecessors of the Phi node are identical it
1414 returns the only operand instead of a new Phi node. If the value
1415 passes two different control flow edges without being defined, and
1416 this is the second path treated, a pointer to the node that will be
1417 allocated for the first path (recursion) is returned. We already
1418 know the address of this node, as it is the next node to be allocated
1419 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1420 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1422 /* Now we now the value for "pos" and can enter it in the array with
1423 all known local variables. Attention: this might be a pointer to
1424 a node, that later will be allocated!!! See new_rd_Phi_in.
1425 If this is called in mature, after some set_value in the same block,
1426 the proper value must not be overwritten:
1428 get_value (makes Phi0, put's it into graph_arr)
1429 set_value (overwrites Phi0 in graph_arr)
1430 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1433 if (!block->attr.block.graph_arr[pos]) {
1434 block->attr.block.graph_arr[pos] = res;
1436 /* printf(" value already computed by %s\n",
1437 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1443 /* This function returns the last definition of a variable. In case
1444 this variable was last defined in a previous block, Phi nodes are
1445 inserted. If the part of the firm graph containing the definition
1446 is not yet constructed, a dummy Phi node is returned. */
1448 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1451 /* There are 4 cases to treat.
1453 1. The block is not mature and we visit it the first time. We can not
1454 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1455 predecessors is returned. This node is added to the linked list (field
1456 "link") of the containing block to be completed when this block is
1457 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1460 2. The value is already known in this block, graph_arr[pos] is set and we
1461 visit the block the first time. We can return the value without
1462 creating any new nodes.
1464 3. The block is mature and we visit it the first time. A Phi node needs
1465 to be created (phi_merge). If the Phi is not needed, as all it's
1466 operands are the same value reaching the block through different
1467 paths, it's optimized away and the value itself is returned.
1469 4. The block is mature, and we visit it the second time. Now two
1470 subcases are possible:
1471 * The value was computed completely the last time we were here. This
1472 is the case if there is no loop. We can return the proper value.
1473 * The recursion that visited this node and set the flag did not
1474 return yet. We are computing a value in a loop and need to
1475 break the recursion without knowing the result yet.
1476 @@@ strange case. Straight forward we would create a Phi before
1477 starting the computation of it's predecessors. In this case we will
1478 find a Phi here in any case. The problem is that this implementation
1479 only creates a Phi after computing the predecessors, so that it is
1480 hard to compute self references of this Phi. @@@
1481 There is no simple check for the second subcase. Therefore we check
1482 for a second visit and treat all such cases as the second subcase.
1483 Anyways, the basic situation is the same: we reached a block
1484 on two paths without finding a definition of the value: No Phi
1485 nodes are needed on both paths.
1486 We return this information "Two paths, no Phi needed" by a very tricky
1487 implementation that relies on the fact that an obstack is a stack and
1488 will return a node with the same address on different allocations.
1489 Look also at phi_merge and new_rd_phi_in to understand this.
1490 @@@ Unfortunately this does not work, see testprogram
1491 three_cfpred_example.
1495 /* case 4 -- already visited. */
1496 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1498 /* visited the first time */
1499 set_irn_visited(block, get_irg_visited(current_ir_graph));
1501 /* Get the local valid value */
1502 res = block->attr.block.graph_arr[pos];
1504 /* case 2 -- If the value is actually computed, return it. */
1505 if (res) return res;
1507 if (block->attr.block.matured) { /* case 3 */
1509 /* The Phi has the same amount of ins as the corresponding block. */
1510 int ins = get_irn_arity(block);
1512 NEW_ARR_A (ir_node *, nin, ins);
1514 /* Phi merge collects the predecessors and then creates a node. */
1515 res = phi_merge (block, pos, mode, nin, ins);
1517 } else { /* case 1 */
1518 /* The block is not mature, we don't know how many in's are needed. A Phi
1519 with zero predecessors is created. Such a Phi node is called Phi0
1520 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1521 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1523 The Phi0 has to remember the pos of it's internal value. If the real
1524 Phi is computed, pos is used to update the array with the local
1527 res = new_rd_Phi0 (current_ir_graph, block, mode);
1528 res->attr.phi0_pos = pos;
1529 res->link = block->link;
1533 /* If we get here, the frontend missed a use-before-definition error */
1536 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1537 assert (mode->code >= irm_F && mode->code <= irm_P);
1538 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1539 tarval_mode_null[mode->code]);
1542 /* The local valid value is available now. */
1543 block->attr.block.graph_arr[pos] = res;
1551 it starts the recursion. This causes an Id at the entry of
1552 every block that has no definition of the value! **/
1554 #if USE_EXPLICIT_PHI_IN_STACK
1556 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1557 void free_Phi_in_stack(Phi_in_stack *s) { }
1560 static INLINE ir_node *
1561 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1562 ir_node **in, int ins, ir_node *phi0)
1565 ir_node *res, *known;
1567 /* Allocate a new node on the obstack. The allocation copies the in
1569 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1570 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1572 /* This loop checks whether the Phi has more than one predecessor.
1573 If so, it is a real Phi node and we break the loop. Else the
1574 Phi node merges the same definition on several paths and therefore
1575 is not needed. Don't consider Bad nodes! */
1577 for (i=0; i < ins; ++i)
1581 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1583 /* Optimize self referencing Phis: We can't detect them yet properly, as
1584 they still refer to the Phi0 they will replace. So replace right now. */
1585 if (phi0 && in[i] == phi0) in[i] = res;
1587 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1595 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1598 edges_node_deleted(res, current_ir_graph);
1599 obstack_free (current_ir_graph->obst, res);
1600 if (is_Phi(known)) {
1601 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1602 order, an enclosing Phi know may get superfluous. */
1603 res = optimize_in_place_2(known);
1605 exchange(known, res);
1611 /* A undefined value, e.g., in unreachable code. */
1615 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1616 IRN_VRFY_IRG(res, irg);
1617 /* Memory Phis in endless loops must be kept alive.
1618 As we can't distinguish these easily we keep all of them alive. */
1619 if ((res->op == op_Phi) && (mode == mode_M))
1620 add_End_keepalive(irg->end, res);
1627 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1629 #if PRECISE_EXC_CONTEXT
1631 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1633 /* Construct a new frag_array for node n.
1634 Copy the content from the current graph_arr of the corresponding block:
1635 this is the current state.
1636 Set ProjM(n) as current memory state.
1637 Further the last entry in frag_arr of current block points to n. This
1638 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1640 static INLINE ir_node ** new_frag_arr (ir_node *n)
1645 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1646 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1647 sizeof(ir_node *)*current_ir_graph->n_loc);
1649 /* turn off optimization before allocating Proj nodes, as res isn't
1651 opt = get_opt_optimize(); set_optimize(0);
1652 /* Here we rely on the fact that all frag ops have Memory as first result! */
1653 if (get_irn_op(n) == op_Call)
1654 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1656 assert((pn_Quot_M == pn_DivMod_M) &&
1657 (pn_Quot_M == pn_Div_M) &&
1658 (pn_Quot_M == pn_Mod_M) &&
1659 (pn_Quot_M == pn_Load_M) &&
1660 (pn_Quot_M == pn_Store_M) &&
1661 (pn_Quot_M == pn_Alloc_M) );
1662 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1666 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1671 * returns the frag_arr from a node
1673 static INLINE ir_node **
1674 get_frag_arr (ir_node *n) {
1675 switch (get_irn_opcode(n)) {
1677 return n->attr.call.exc.frag_arr;
1679 return n->attr.a.exc.frag_arr;
1681 return n->attr.load.exc.frag_arr;
1683 return n->attr.store.exc.frag_arr;
1685 return n->attr.except.frag_arr;
1690 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1692 if (!frag_arr[pos]) frag_arr[pos] = val;
1693 if (frag_arr[current_ir_graph->n_loc - 1]) {
1694 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1695 assert(arr != frag_arr && "Endless recursion detected");
1696 set_frag_value(arr, pos, val);
1701 for (i = 0; i < 1000; ++i) {
1702 if (!frag_arr[pos]) {
1703 frag_arr[pos] = val;
1705 if (frag_arr[current_ir_graph->n_loc - 1]) {
1706 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1712 assert(0 && "potential endless recursion");
1717 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1721 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1723 frag_arr = get_frag_arr(cfOp);
1724 res = frag_arr[pos];
1726 if (block->attr.block.graph_arr[pos]) {
1727 /* There was a set_value after the cfOp and no get_value before that
1728 set_value. We must build a Phi node now. */
1729 if (block->attr.block.matured) {
1730 int ins = get_irn_arity(block);
1732 NEW_ARR_A (ir_node *, nin, ins);
1733 res = phi_merge(block, pos, mode, nin, ins);
1735 res = new_rd_Phi0 (current_ir_graph, block, mode);
1736 res->attr.phi0_pos = pos;
1737 res->link = block->link;
1741 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1742 but this should be better: (remove comment if this works) */
1743 /* It's a Phi, we can write this into all graph_arrs with NULL */
1744 set_frag_value(block->attr.block.graph_arr, pos, res);
1746 res = get_r_value_internal(block, pos, mode);
1747 set_frag_value(block->attr.block.graph_arr, pos, res);
1755 computes the predecessors for the real phi node, and then
1756 allocates and returns this node. The routine called to allocate the
1757 node might optimize it away and return a real value.
1758 This function must be called with an in-array of proper size. **/
1760 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1762 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1765 /* If this block has no value at pos create a Phi0 and remember it
1766 in graph_arr to break recursions.
1767 Else we may not set graph_arr as there a later value is remembered. */
1769 if (!block->attr.block.graph_arr[pos]) {
1770 if (block == get_irg_start_block(current_ir_graph)) {
1771 /* Collapsing to Bad tarvals is no good idea.
1772 So we call a user-supplied routine here that deals with this case as
1773 appropriate for the given language. Sorrily the only help we can give
1774 here is the position.
1776 Even if all variables are defined before use, it can happen that
1777 we get to the start block, if a Cond has been replaced by a tuple
1778 (bad, jmp). In this case we call the function needlessly, eventually
1779 generating an non existent error.
1780 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1783 if (default_initialize_local_variable)
1784 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1786 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1787 /* We don't need to care about exception ops in the start block.
1788 There are none by definition. */
1789 return block->attr.block.graph_arr[pos];
1791 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1792 block->attr.block.graph_arr[pos] = phi0;
1793 #if PRECISE_EXC_CONTEXT
1794 if (get_opt_precise_exc_context()) {
1795 /* Set graph_arr for fragile ops. Also here we should break recursion.
1796 We could choose a cyclic path through an cfop. But the recursion would
1797 break at some point. */
1798 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1804 /* This loop goes to all predecessor blocks of the block the Phi node
1805 is in and there finds the operands of the Phi node by calling
1806 get_r_value_internal. */
1807 for (i = 1; i <= ins; ++i) {
1808 prevCfOp = skip_Proj(block->in[i]);
1810 if (is_Bad(prevCfOp)) {
1811 /* In case a Cond has been optimized we would get right to the start block
1812 with an invalid definition. */
1813 nin[i-1] = new_Bad();
1816 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1818 if (!is_Bad(prevBlock)) {
1819 #if PRECISE_EXC_CONTEXT
1820 if (get_opt_precise_exc_context() &&
1821 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1822 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1823 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1826 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1828 nin[i-1] = new_Bad();
1832 /* We want to pass the Phi0 node to the constructor: this finds additional
1833 optimization possibilities.
1834 The Phi0 node either is allocated in this function, or it comes from
1835 a former call to get_r_value_internal. In this case we may not yet
1836 exchange phi0, as this is done in mature_immBlock. */
1838 phi0_all = block->attr.block.graph_arr[pos];
1839 if (!((get_irn_op(phi0_all) == op_Phi) &&
1840 (get_irn_arity(phi0_all) == 0) &&
1841 (get_nodes_block(phi0_all) == block)))
1847 /* After collecting all predecessors into the array nin a new Phi node
1848 with these predecessors is created. This constructor contains an
1849 optimization: If all predecessors of the Phi node are identical it
1850 returns the only operand instead of a new Phi node. */
1851 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1853 /* In case we allocated a Phi0 node at the beginning of this procedure,
1854 we need to exchange this Phi0 with the real Phi. */
1856 exchange(phi0, res);
1857 block->attr.block.graph_arr[pos] = res;
1858 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1859 only an optimization. */
1865 /* This function returns the last definition of a variable. In case
1866 this variable was last defined in a previous block, Phi nodes are
1867 inserted. If the part of the firm graph containing the definition
1868 is not yet constructed, a dummy Phi node is returned. */
1870 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1873 /* There are 4 cases to treat.
1875 1. The block is not mature and we visit it the first time. We can not
1876 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1877 predecessors is returned. This node is added to the linked list (field
1878 "link") of the containing block to be completed when this block is
1879 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1882 2. The value is already known in this block, graph_arr[pos] is set and we
1883 visit the block the first time. We can return the value without
1884 creating any new nodes.
1886 3. The block is mature and we visit it the first time. A Phi node needs
1887 to be created (phi_merge). If the Phi is not needed, as all it's
1888 operands are the same value reaching the block through different
1889 paths, it's optimized away and the value itself is returned.
1891 4. The block is mature, and we visit it the second time. Now two
1892 subcases are possible:
1893 * The value was computed completely the last time we were here. This
1894 is the case if there is no loop. We can return the proper value.
1895 * The recursion that visited this node and set the flag did not
1896 return yet. We are computing a value in a loop and need to
1897 break the recursion. This case only happens if we visited
1898 the same block with phi_merge before, which inserted a Phi0.
1899 So we return the Phi0.
1902 /* case 4 -- already visited. */
1903 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1904 /* As phi_merge allocates a Phi0 this value is always defined. Here
1905 is the critical difference of the two algorithms. */
1906 assert(block->attr.block.graph_arr[pos]);
1907 return block->attr.block.graph_arr[pos];
1910 /* visited the first time */
1911 set_irn_visited(block, get_irg_visited(current_ir_graph));
1913 /* Get the local valid value */
1914 res = block->attr.block.graph_arr[pos];
1916 /* case 2 -- If the value is actually computed, return it. */
1917 if (res) { return res; };
1919 if (block->attr.block.matured) { /* case 3 */
1921 /* The Phi has the same amount of ins as the corresponding block. */
1922 int ins = get_irn_arity(block);
1924 NEW_ARR_A (ir_node *, nin, ins);
1926 /* Phi merge collects the predecessors and then creates a node. */
1927 res = phi_merge (block, pos, mode, nin, ins);
1929 } else { /* case 1 */
1930 /* The block is not mature, we don't know how many in's are needed. A Phi
1931 with zero predecessors is created. Such a Phi node is called Phi0
1932 node. The Phi0 is then added to the list of Phi0 nodes in this block
1933 to be matured by mature_immBlock later.
1934 The Phi0 has to remember the pos of it's internal value. If the real
1935 Phi is computed, pos is used to update the array with the local
1937 res = new_rd_Phi0 (current_ir_graph, block, mode);
1938 res->attr.phi0_pos = pos;
1939 res->link = block->link;
1943 /* If we get here, the frontend missed a use-before-definition error */
1946 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1947 assert (mode->code >= irm_F && mode->code <= irm_P);
1948 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1949 get_mode_null(mode));
1952 /* The local valid value is available now. */
1953 block->attr.block.graph_arr[pos] = res;
1958 #endif /* USE_FAST_PHI_CONSTRUCTION */
1960 /* ************************************************************************** */
1963 * Finalize a Block node, when all control flows are known.
1964 * Acceptable parameters are only Block nodes.
1967 mature_immBlock (ir_node *block)
1973 assert (get_irn_opcode(block) == iro_Block);
1974 /* @@@ should be commented in
1975 assert (!get_Block_matured(block) && "Block already matured"); */
1977 if (!get_Block_matured(block)) {
1978 ins = ARR_LEN (block->in)-1;
1979 /* Fix block parameters */
1980 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1982 /* An array for building the Phi nodes. */
1983 NEW_ARR_A (ir_node *, nin, ins);
1985 /* Traverse a chain of Phi nodes attached to this block and mature
1987 for (n = block->link; n; n=next) {
1988 inc_irg_visited(current_ir_graph);
1990 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1993 block->attr.block.matured = 1;
1995 /* Now, as the block is a finished firm node, we can optimize it.
1996 Since other nodes have been allocated since the block was created
1997 we can not free the node on the obstack. Therefore we have to call
1999 Unfortunately the optimization does not change a lot, as all allocated
2000 nodes refer to the unoptimized node.
2001 We can call _2, as global cse has no effect on blocks. */
2002 block = optimize_in_place_2(block);
2003 IRN_VRFY_IRG(block, current_ir_graph);
2008 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
2010 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
2015 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
2017 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
2022 new_d_Const_long(dbg_info* db, ir_mode *mode, long value)
2024 return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value);
2028 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
2030 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
2036 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
2038 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
2043 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
2045 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2050 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2053 assert(arg->op == op_Cond);
2054 arg->attr.c.kind = fragmentary;
2055 arg->attr.c.default_proj = max_proj;
2056 res = new_Proj (arg, mode_X, max_proj);
2061 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2063 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2068 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2070 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2074 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2076 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2081 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2083 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2088 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2090 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2096 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2098 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2103 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2105 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2110 * allocate the frag array
2112 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2113 if (get_opt_precise_exc_context()) {
2114 if ((current_ir_graph->phase_state == phase_building) &&
2115 (get_irn_op(res) == op) && /* Could be optimized away. */
2116 !*frag_store) /* Could be a cse where the arr is already set. */ {
2117 *frag_store = new_frag_arr(res);
2124 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2127 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2129 res->attr.except.pin_state = op_pin_state_pinned;
2130 #if PRECISE_EXC_CONTEXT
2131 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2138 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2141 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2143 res->attr.except.pin_state = op_pin_state_pinned;
2144 #if PRECISE_EXC_CONTEXT
2145 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2152 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2155 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2157 res->attr.except.pin_state = op_pin_state_pinned;
2158 #if PRECISE_EXC_CONTEXT
2159 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2166 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2169 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2171 res->attr.except.pin_state = op_pin_state_pinned;
2172 #if PRECISE_EXC_CONTEXT
2173 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2180 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2182 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2187 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2189 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2194 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2196 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2201 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2203 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2208 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2210 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2215 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2217 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2222 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2224 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2229 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2231 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2236 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2238 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2243 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2245 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2250 new_d_Jmp (dbg_info* db)
2252 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2256 new_d_IJmp (dbg_info* db, ir_node *tgt)
2258 return new_rd_IJmp (db, current_ir_graph, current_ir_graph->current_block, tgt);
2262 new_d_Cond (dbg_info* db, ir_node *c)
2264 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2268 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2272 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2273 store, callee, arity, in, tp);
2274 #if PRECISE_EXC_CONTEXT
2275 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2282 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2284 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2289 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2291 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2296 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2299 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2301 #if PRECISE_EXC_CONTEXT
2302 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2309 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2312 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2314 #if PRECISE_EXC_CONTEXT
2315 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2322 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2326 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2327 store, size, alloc_type, where);
2328 #if PRECISE_EXC_CONTEXT
2329 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2336 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2337 ir_node *size, type *free_type, where_alloc where)
2339 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2340 store, ptr, size, free_type, where);
2344 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2345 /* GL: objptr was called frame before. Frame was a bad choice for the name
2346 as the operand could as well be a pointer to a dynamic object. */
2348 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2349 store, objptr, 0, NULL, ent);
2353 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2355 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2356 store, objptr, n_index, index, sel);
2360 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2362 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2363 store, objptr, ent));
2367 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2369 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2374 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2376 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2381 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2383 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2390 return _new_d_Bad();
2394 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2396 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2401 new_d_Unknown (ir_mode *m)
2403 return new_rd_Unknown(current_ir_graph, m);
2407 new_d_CallBegin (dbg_info *db, ir_node *call)
2410 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2415 new_d_EndReg (dbg_info *db)
2418 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2423 new_d_EndExcept (dbg_info *db)
2426 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2431 new_d_Break (dbg_info *db)
2433 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2437 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2439 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2446 return _new_d_NoMem();
2450 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2451 ir_node *ir_true, ir_mode *mode) {
2452 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2453 sel, ir_false, ir_true, mode);
2456 /* ********************************************************************* */
2457 /* Comfortable interface with automatic Phi node construction. */
2458 /* (Uses also constructors of ?? interface, except new_Block. */
2459 /* ********************************************************************* */
2461 /* Block construction */
2462 /* immature Block without predecessors */
2463 ir_node *new_d_immBlock (dbg_info* db) {
2466 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2467 /* creates a new dynamic in-array as length of in is -1 */
2468 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2469 current_ir_graph->current_block = res;
2470 res->attr.block.matured = 0;
2471 res->attr.block.dead = 0;
2472 /* res->attr.block.exc = exc_normal; */
2473 /* res->attr.block.handler_entry = 0; */
2474 res->attr.block.irg = current_ir_graph;
2475 res->attr.block.backedge = NULL;
2476 res->attr.block.in_cg = NULL;
2477 res->attr.block.cg_backedge = NULL;
2478 set_Block_block_visited(res, 0);
2480 /* Create and initialize array for Phi-node construction. */
2481 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2482 current_ir_graph->n_loc);
2483 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2485 /* Immature block may not be optimized! */
2486 IRN_VRFY_IRG(res, current_ir_graph);
2492 new_immBlock (void) {
2493 return new_d_immBlock(NULL);
2496 /* add an edge to a jmp/control flow node */
2498 add_immBlock_pred (ir_node *block, ir_node *jmp)
2500 if (block->attr.block.matured) {
2501 assert(0 && "Error: Block already matured!\n");
2504 assert(jmp != NULL);
2505 ARR_APP1(ir_node *, block->in, jmp);
2509 /* changing the current block */
2511 set_cur_block (ir_node *target) {
2512 current_ir_graph->current_block = target;
2515 /* ************************ */
2516 /* parameter administration */
2518 /* get a value from the parameter array from the current block by its index */
2520 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2522 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2523 inc_irg_visited(current_ir_graph);
2525 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2527 /* get a value from the parameter array from the current block by its index */
2529 get_value (int pos, ir_mode *mode)
2531 return get_d_value(NULL, pos, mode);
2534 /* set a value at position pos in the parameter array from the current block */
2536 set_value (int pos, ir_node *value)
2538 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2539 assert(pos+1 < current_ir_graph->n_loc);
2540 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2543 /* get the current store */
2547 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2548 /* GL: one could call get_value instead */
2549 inc_irg_visited(current_ir_graph);
2550 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2553 /* set the current store */
2555 set_store (ir_node *store)
2557 /* GL: one could call set_value instead */
2558 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2559 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2563 keep_alive (ir_node *ka) {
2564 add_End_keepalive(current_ir_graph->end, ka);
2567 /* --- Useful access routines --- */
2568 /* Returns the current block of the current graph. To set the current
2569 block use set_cur_block. */
2570 ir_node *get_cur_block(void) {
2571 return get_irg_current_block(current_ir_graph);
2574 /* Returns the frame type of the current graph */
2575 type *get_cur_frame_type(void) {
2576 return get_irg_frame_type(current_ir_graph);
2580 /* ********************************************************************* */
2583 /* call once for each run of the library */
2585 init_cons(uninitialized_local_variable_func_t *func)
2587 default_initialize_local_variable = func;
2590 /* call for each graph */
2592 irg_finalize_cons (ir_graph *irg) {
2593 irg->phase_state = phase_high;
2597 irp_finalize_cons (void) {
2598 int i, n_irgs = get_irp_n_irgs();
2599 for (i = 0; i < n_irgs; i++) {
2600 irg_finalize_cons(get_irp_irg(i));
2602 irp->phase_state = phase_high;\
2608 ir_node *new_Block(int arity, ir_node **in) {
2609 return new_d_Block(NULL, arity, in);
2611 ir_node *new_Start (void) {
2612 return new_d_Start(NULL);
2614 ir_node *new_End (void) {
2615 return new_d_End(NULL);
2617 ir_node *new_Jmp (void) {
2618 return new_d_Jmp(NULL);
2620 ir_node *new_IJmp (ir_node *tgt) {
2621 return new_d_IJmp(NULL, tgt);
2623 ir_node *new_Cond (ir_node *c) {
2624 return new_d_Cond(NULL, c);
2626 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2627 return new_d_Return(NULL, store, arity, in);
2629 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2630 return new_d_Raise(NULL, store, obj);
2632 ir_node *new_Const (ir_mode *mode, tarval *con) {
2633 return new_d_Const(NULL, mode, con);
2636 ir_node *new_Const_long(ir_mode *mode, long value)
2638 return new_d_Const_long(NULL, mode, value);
2641 ir_node *new_Const_type(tarval *con, type *tp) {
2642 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2645 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2646 return new_d_SymConst(NULL, value, kind);
2648 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2649 return new_d_simpleSel(NULL, store, objptr, ent);
2651 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2653 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2655 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2656 return new_d_InstOf (NULL, store, objptr, ent);
2658 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2660 return new_d_Call(NULL, store, callee, arity, in, tp);
2662 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2663 return new_d_Add(NULL, op1, op2, mode);
2665 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2666 return new_d_Sub(NULL, op1, op2, mode);
2668 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2669 return new_d_Minus(NULL, op, mode);
2671 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2672 return new_d_Mul(NULL, op1, op2, mode);
2674 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2675 return new_d_Quot(NULL, memop, op1, op2);
2677 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2678 return new_d_DivMod(NULL, memop, op1, op2);
2680 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2681 return new_d_Div(NULL, memop, op1, op2);
2683 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2684 return new_d_Mod(NULL, memop, op1, op2);
2686 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2687 return new_d_Abs(NULL, op, mode);
2689 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2690 return new_d_And(NULL, op1, op2, mode);
2692 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2693 return new_d_Or(NULL, op1, op2, mode);
2695 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2696 return new_d_Eor(NULL, op1, op2, mode);
2698 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2699 return new_d_Not(NULL, op, mode);
2701 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2702 return new_d_Shl(NULL, op, k, mode);
2704 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2705 return new_d_Shr(NULL, op, k, mode);
2707 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2708 return new_d_Shrs(NULL, op, k, mode);
2710 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2711 return new_d_Rot(NULL, op, k, mode);
2713 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2714 return new_d_Cmp(NULL, op1, op2);
2716 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2717 return new_d_Conv(NULL, op, mode);
2719 ir_node *new_Cast (ir_node *op, type *to_tp) {
2720 return new_d_Cast(NULL, op, to_tp);
2722 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2723 return new_d_Phi(NULL, arity, in, mode);
2725 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2726 return new_d_Load(NULL, store, addr, mode);
2728 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2729 return new_d_Store(NULL, store, addr, val);
2731 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2732 where_alloc where) {
2733 return new_d_Alloc(NULL, store, size, alloc_type, where);
2735 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2736 type *free_type, where_alloc where) {
2737 return new_d_Free(NULL, store, ptr, size, free_type, where);
2739 ir_node *new_Sync (int arity, ir_node **in) {
2740 return new_d_Sync(NULL, arity, in);
2742 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2743 return new_d_Proj(NULL, arg, mode, proj);
2745 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2746 return new_d_defaultProj(NULL, arg, max_proj);
2748 ir_node *new_Tuple (int arity, ir_node **in) {
2749 return new_d_Tuple(NULL, arity, in);
2751 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2752 return new_d_Id(NULL, val, mode);
2754 ir_node *new_Bad (void) {
2757 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2758 return new_d_Confirm (NULL, val, bound, cmp);
2760 ir_node *new_Unknown(ir_mode *m) {
2761 return new_d_Unknown(m);
2763 ir_node *new_CallBegin (ir_node *callee) {
2764 return new_d_CallBegin(NULL, callee);
2766 ir_node *new_EndReg (void) {
2767 return new_d_EndReg(NULL);
2769 ir_node *new_EndExcept (void) {
2770 return new_d_EndExcept(NULL);
2772 ir_node *new_Break (void) {
2773 return new_d_Break(NULL);
2775 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2776 return new_d_Filter(NULL, arg, mode, proj);
2778 ir_node *new_NoMem (void) {
2779 return new_d_NoMem();
2781 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2782 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);