3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* --------------------------------------------- */
66 /* private interfaces, for professional use only */
67 /* --------------------------------------------- */
69 /* Constructs a Block with a fixed number of predecessors.
70 Does not set current_block. Can not be used with automatic
71 Phi node construction. */
73 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
77 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
78 set_Block_matured(res, 1);
79 set_Block_block_visited(res, 0);
81 /* res->attr.block.exc = exc_normal; */
82 /* res->attr.block.handler_entry = 0; */
83 res->attr.block.dead = 0;
84 res->attr.block.irg = irg;
85 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
86 res->attr.block.in_cg = NULL;
87 res->attr.block.cg_backedge = NULL;
88 res->attr.block.extblk = NULL;
90 IRN_VRFY_IRG(res, irg);
95 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
99 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
100 /* res->attr.start.irg = irg; */
102 IRN_VRFY_IRG(res, irg);
107 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
124 bool has_unknown = false;
126 /* Don't assert that block matured: the use of this constructor is strongly
128 if ( get_Block_matured(block) )
129 assert( get_irn_arity(block) == arity );
131 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
133 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
135 for (i = arity-1; i >= 0; i--)
136 if (get_irn_op(in[i]) == op_Unknown) {
141 if (!has_unknown) res = optimize_node (res);
142 IRN_VRFY_IRG(res, irg);
144 /* Memory Phis in endless loops must be kept alive.
145 As we can't distinguish these easily we keep all of them alive. */
146 if ((res->op == op_Phi) && (mode == mode_M))
147 add_End_keepalive(irg->end, res);
152 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
157 res->attr.con.tv = con;
158 set_Const_type(res, tp); /* Call method because of complex assertion. */
159 res = optimize_node (res);
160 assert(get_Const_type(res) == tp);
161 IRN_VRFY_IRG(res, irg);
167 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
169 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
173 new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
175 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
179 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
183 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
184 res = optimize_node(res);
185 IRN_VRFY_IRG(res, irg);
190 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
195 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
196 res->attr.proj = proj;
199 assert(get_Proj_pred(res));
200 assert(get_nodes_block(get_Proj_pred(res)));
202 res = optimize_node(res);
204 IRN_VRFY_IRG(res, irg);
210 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
214 assert(arg->op == op_Cond);
215 arg->attr.c.kind = fragmentary;
216 arg->attr.c.default_proj = max_proj;
217 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
222 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
226 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
227 res = optimize_node(res);
228 IRN_VRFY_IRG(res, irg);
233 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
237 assert(is_atomic_type(to_tp));
239 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
240 res->attr.cast.totype = to_tp;
241 res = optimize_node(res);
242 IRN_VRFY_IRG(res, irg);
247 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
251 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
252 res = optimize_node (res);
253 IRN_VRFY_IRG(res, irg);
258 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
259 ir_node *op1, ir_node *op2, ir_mode *mode)
266 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
267 res = optimize_node(res);
268 IRN_VRFY_IRG(res, irg);
273 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
274 ir_node *op1, ir_node *op2, ir_mode *mode)
281 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
282 res = optimize_node (res);
283 IRN_VRFY_IRG(res, irg);
288 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *op, ir_mode *mode)
293 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
294 res = optimize_node(res);
295 IRN_VRFY_IRG(res, irg);
300 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *op1, ir_node *op2, ir_mode *mode)
308 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
309 res = optimize_node(res);
310 IRN_VRFY_IRG(res, irg);
315 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
325 res = optimize_node(res);
326 IRN_VRFY_IRG(res, irg);
331 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
340 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
347 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *memop, ir_node *op1, ir_node *op2)
356 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
357 res = optimize_node(res);
358 IRN_VRFY_IRG(res, irg);
363 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
364 ir_node *memop, ir_node *op1, ir_node *op2)
372 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
373 res = optimize_node(res);
374 IRN_VRFY_IRG(res, irg);
379 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
380 ir_node *op1, ir_node *op2, ir_mode *mode)
387 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
388 res = optimize_node(res);
389 IRN_VRFY_IRG(res, irg);
394 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
395 ir_node *op1, ir_node *op2, ir_mode *mode)
402 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
403 res = optimize_node(res);
404 IRN_VRFY_IRG(res, irg);
409 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
410 ir_node *op1, ir_node *op2, ir_mode *mode)
417 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
418 res = optimize_node (res);
419 IRN_VRFY_IRG(res, irg);
424 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
425 ir_node *op, ir_mode *mode)
429 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
437 ir_node *op, ir_node *k, ir_mode *mode)
444 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
445 res = optimize_node(res);
446 IRN_VRFY_IRG(res, irg);
451 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
452 ir_node *op, ir_node *k, ir_mode *mode)
459 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
460 res = optimize_node(res);
461 IRN_VRFY_IRG(res, irg);
466 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
467 ir_node *op, ir_node *k, ir_mode *mode)
474 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
475 res = optimize_node(res);
476 IRN_VRFY_IRG(res, irg);
481 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
482 ir_node *op, ir_node *k, ir_mode *mode)
489 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
490 res = optimize_node(res);
491 IRN_VRFY_IRG(res, irg);
496 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
497 ir_node *op, ir_mode *mode)
501 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
502 res = optimize_node (res);
503 IRN_VRFY_IRG(res, irg);
508 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
509 ir_node *op1, ir_node *op2)
516 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
517 res = optimize_node(res);
518 IRN_VRFY_IRG(res, irg);
523 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
527 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
528 res = optimize_node (res);
529 IRN_VRFY_IRG (res, irg);
534 new_rd_IJmp (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *tgt)
538 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
539 res = optimize_node (res);
540 IRN_VRFY_IRG (res, irg);
542 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
548 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
552 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
553 res->attr.c.kind = dense;
554 res->attr.c.default_proj = 0;
555 res = optimize_node (res);
556 IRN_VRFY_IRG(res, irg);
561 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
562 ir_node *callee, int arity, ir_node **in, type *tp)
569 NEW_ARR_A(ir_node *, r_in, r_arity);
572 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
574 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
576 assert((get_unknown_type() == tp) || is_Method_type(tp));
577 set_Call_type(res, tp);
578 res->attr.call.exc.pin_state = op_pin_state_pinned;
579 res->attr.call.callee_arr = NULL;
580 res = optimize_node(res);
581 IRN_VRFY_IRG(res, irg);
586 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
587 ir_node *store, int arity, ir_node **in)
594 NEW_ARR_A (ir_node *, r_in, r_arity);
596 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
597 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
598 res = optimize_node(res);
599 IRN_VRFY_IRG(res, irg);
604 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
611 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
618 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
619 ir_node *store, ir_node *adr, ir_mode *mode)
626 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
627 res->attr.load.exc.pin_state = op_pin_state_pinned;
628 res->attr.load.load_mode = mode;
629 res->attr.load.volatility = volatility_non_volatile;
630 res = optimize_node(res);
631 IRN_VRFY_IRG(res, irg);
636 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
637 ir_node *store, ir_node *adr, ir_node *val)
645 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
646 res->attr.store.exc.pin_state = op_pin_state_pinned;
647 res->attr.store.volatility = volatility_non_volatile;
648 res = optimize_node(res);
649 IRN_VRFY_IRG(res, irg);
654 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
655 ir_node *size, type *alloc_type, where_alloc where)
662 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
663 res->attr.a.exc.pin_state = op_pin_state_pinned;
664 res->attr.a.where = where;
665 res->attr.a.type = alloc_type;
666 res = optimize_node(res);
667 IRN_VRFY_IRG(res, irg);
672 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
673 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
681 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
682 res->attr.f.where = where;
683 res->attr.f.type = free_type;
684 res = optimize_node(res);
685 IRN_VRFY_IRG(res, irg);
690 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
691 int arity, ir_node **in, entity *ent)
697 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
700 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
703 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
705 * FIXM: Sel's can select functions which should be of mode mode_P_code.
707 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
708 res->attr.s.ent = ent;
709 res = optimize_node(res);
710 IRN_VRFY_IRG(res, irg);
715 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
716 ir_node *objptr, type *ent)
723 NEW_ARR_A(ir_node *, r_in, r_arity);
727 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
728 res->attr.io.ent = ent;
730 /* res = optimize(res); */
731 IRN_VRFY_IRG(res, irg);
736 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
737 symconst_kind symkind, type *tp) {
741 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
742 mode = mode_P_data; /* FIXME: can be mode_P_code */
746 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
748 res->attr.i.num = symkind;
749 res->attr.i.sym = value;
752 res = optimize_node(res);
753 IRN_VRFY_IRG(res, irg);
758 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
759 symconst_kind symkind)
761 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
765 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
766 symconst_symbol sym = {(type *)symbol};
767 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
770 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
771 symconst_symbol sym = {(type *)symbol};
772 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
775 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
776 symconst_symbol sym = {symbol};
777 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
780 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
781 symconst_symbol sym = {symbol};
782 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
786 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
790 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
797 new_rd_Bad (ir_graph *irg)
803 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
805 ir_node *in[2], *res;
809 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
810 res->attr.confirm_cmp = cmp;
811 res = optimize_node (res);
812 IRN_VRFY_IRG(res, irg);
816 /* this function is often called with current_ir_graph unset */
818 new_rd_Unknown (ir_graph *irg, ir_mode *m)
820 ir_graph *rem = current_ir_graph;
823 current_ir_graph = irg;
824 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
825 res = optimize_node(res);
826 current_ir_graph = rem;
831 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
836 in[0] = get_Call_ptr(call);
837 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
838 /* res->attr.callbegin.irg = irg; */
839 res->attr.callbegin.call = call;
840 res = optimize_node(res);
841 IRN_VRFY_IRG(res, irg);
846 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
850 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
852 IRN_VRFY_IRG(res, irg);
857 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
861 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
862 irg->end_except = res;
863 IRN_VRFY_IRG (res, irg);
868 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
872 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
873 res = optimize_node(res);
874 IRN_VRFY_IRG(res, irg);
879 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
884 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
885 res->attr.filter.proj = proj;
886 res->attr.filter.in_cg = NULL;
887 res->attr.filter.backedge = NULL;
890 assert(get_Proj_pred(res));
891 assert(get_nodes_block(get_Proj_pred(res)));
893 res = optimize_node(res);
894 IRN_VRFY_IRG(res, irg);
899 new_rd_NoMem (ir_graph *irg) {
904 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
905 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
914 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
917 res = optimize_node(res);
918 IRN_VRFY_IRG(res, irg);
923 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
924 return new_rd_Block(NULL, irg, arity, in);
926 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
927 return new_rd_Start(NULL, irg, block);
929 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
930 return new_rd_End(NULL, irg, block);
932 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
933 return new_rd_Jmp(NULL, irg, block);
935 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
936 return new_rd_IJmp(NULL, irg, block, tgt);
938 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
939 return new_rd_Cond(NULL, irg, block, c);
941 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
942 ir_node *store, int arity, ir_node **in) {
943 return new_rd_Return(NULL, irg, block, store, arity, in);
945 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
946 ir_node *store, ir_node *obj) {
947 return new_rd_Raise(NULL, irg, block, store, obj);
949 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
950 ir_mode *mode, tarval *con) {
951 return new_rd_Const(NULL, irg, block, mode, con);
954 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
955 ir_mode *mode, long value) {
956 return new_rd_Const_long(NULL, irg, block, mode, value);
959 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
960 ir_mode *mode, tarval *con, type *tp) {
961 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
964 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
965 symconst_symbol value, symconst_kind symkind) {
966 return new_rd_SymConst(NULL, irg, block, value, symkind);
968 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
969 ir_node *objptr, int n_index, ir_node **index,
971 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
973 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
975 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
977 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
978 ir_node *callee, int arity, ir_node **in,
980 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
982 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
983 ir_node *op1, ir_node *op2, ir_mode *mode) {
984 return new_rd_Add(NULL, irg, block, op1, op2, mode);
986 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
987 ir_node *op1, ir_node *op2, ir_mode *mode) {
988 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
990 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
991 ir_node *op, ir_mode *mode) {
992 return new_rd_Minus(NULL, irg, block, op, mode);
994 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
995 ir_node *op1, ir_node *op2, ir_mode *mode) {
996 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
998 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
999 ir_node *memop, ir_node *op1, ir_node *op2) {
1000 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1002 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1003 ir_node *memop, ir_node *op1, ir_node *op2) {
1004 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1006 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1007 ir_node *memop, ir_node *op1, ir_node *op2) {
1008 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1010 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1011 ir_node *memop, ir_node *op1, ir_node *op2) {
1012 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1014 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1015 ir_node *op, ir_mode *mode) {
1016 return new_rd_Abs(NULL, irg, block, op, mode);
1018 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1019 ir_node *op1, ir_node *op2, ir_mode *mode) {
1020 return new_rd_And(NULL, irg, block, op1, op2, mode);
1022 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1023 ir_node *op1, ir_node *op2, ir_mode *mode) {
1024 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1026 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1027 ir_node *op1, ir_node *op2, ir_mode *mode) {
1028 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1030 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1031 ir_node *op, ir_mode *mode) {
1032 return new_rd_Not(NULL, irg, block, op, mode);
1034 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1035 ir_node *op1, ir_node *op2) {
1036 return new_rd_Cmp(NULL, irg, block, op1, op2);
1038 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1039 ir_node *op, ir_node *k, ir_mode *mode) {
1040 return new_rd_Shl(NULL, irg, block, op, k, mode);
1042 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1043 ir_node *op, ir_node *k, ir_mode *mode) {
1044 return new_rd_Shr(NULL, irg, block, op, k, mode);
1046 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1047 ir_node *op, ir_node *k, ir_mode *mode) {
1048 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1050 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1051 ir_node *op, ir_node *k, ir_mode *mode) {
1052 return new_rd_Rot(NULL, irg, block, op, k, mode);
1054 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1055 ir_node *op, ir_mode *mode) {
1056 return new_rd_Conv(NULL, irg, block, op, mode);
1058 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1059 return new_rd_Cast(NULL, irg, block, op, to_tp);
1061 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1062 ir_node **in, ir_mode *mode) {
1063 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1065 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1066 ir_node *store, ir_node *adr, ir_mode *mode) {
1067 return new_rd_Load(NULL, irg, block, store, adr, mode);
1069 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1070 ir_node *store, ir_node *adr, ir_node *val) {
1071 return new_rd_Store(NULL, irg, block, store, adr, val);
1073 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1074 ir_node *size, type *alloc_type, where_alloc where) {
1075 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1077 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1078 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1079 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1081 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1082 return new_rd_Sync(NULL, irg, block, arity, in);
1084 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1085 ir_mode *mode, long proj) {
1086 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1088 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1090 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1092 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1093 int arity, ir_node **in) {
1094 return new_rd_Tuple(NULL, irg, block, arity, in );
1096 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1097 ir_node *val, ir_mode *mode) {
1098 return new_rd_Id(NULL, irg, block, val, mode);
1100 ir_node *new_r_Bad (ir_graph *irg) {
1101 return new_rd_Bad(irg);
1103 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1104 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1106 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1107 return new_rd_Unknown(irg, m);
1109 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1110 return new_rd_CallBegin(NULL, irg, block, callee);
1112 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1113 return new_rd_EndReg(NULL, irg, block);
1115 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1116 return new_rd_EndExcept(NULL, irg, block);
1118 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1119 return new_rd_Break(NULL, irg, block);
1121 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1122 ir_mode *mode, long proj) {
1123 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1125 ir_node *new_r_NoMem (ir_graph *irg) {
1126 return new_rd_NoMem(irg);
1128 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1129 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1130 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1134 /** ********************/
1135 /** public interfaces */
1136 /** construction tools */
1140 * - create a new Start node in the current block
1142 * @return s - pointer to the created Start node
1147 new_d_Start (dbg_info* db)
1151 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1152 op_Start, mode_T, 0, NULL);
1153 /* res->attr.start.irg = current_ir_graph; */
1155 res = optimize_node(res);
1156 IRN_VRFY_IRG(res, current_ir_graph);
1161 new_d_End (dbg_info* db)
1164 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1165 op_End, mode_X, -1, NULL);
1166 res = optimize_node(res);
1167 IRN_VRFY_IRG(res, current_ir_graph);
1172 /* Constructs a Block with a fixed number of predecessors.
1173 Does set current_block. Can be used with automatic Phi
1174 node construction. */
1176 new_d_Block (dbg_info* db, int arity, ir_node **in)
1180 bool has_unknown = false;
1182 res = new_rd_Block(db, current_ir_graph, arity, in);
1184 /* Create and initialize array for Phi-node construction. */
1185 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1186 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1187 current_ir_graph->n_loc);
1188 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1191 for (i = arity-1; i >= 0; i--)
1192 if (get_irn_op(in[i]) == op_Unknown) {
1197 if (!has_unknown) res = optimize_node(res);
1198 current_ir_graph->current_block = res;
1200 IRN_VRFY_IRG(res, current_ir_graph);
1205 /* ***********************************************************************/
1206 /* Methods necessary for automatic Phi node creation */
1208 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1209 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1210 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1211 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1213 Call Graph: ( A ---> B == A "calls" B)
1215 get_value mature_immBlock
1223 get_r_value_internal |
1227 new_rd_Phi0 new_rd_Phi_in
1229 * *************************************************************************** */
1231 /** Creates a Phi node with 0 predecessors */
1232 static INLINE ir_node *
1233 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1237 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1238 IRN_VRFY_IRG(res, irg);
1242 /* There are two implementations of the Phi node construction. The first
1243 is faster, but does not work for blocks with more than 2 predecessors.
1244 The second works always but is slower and causes more unnecessary Phi
1246 Select the implementations by the following preprocessor flag set in
1248 #if USE_FAST_PHI_CONSTRUCTION
1250 /* This is a stack used for allocating and deallocating nodes in
1251 new_rd_Phi_in. The original implementation used the obstack
1252 to model this stack, now it is explicit. This reduces side effects.
1254 #if USE_EXPLICIT_PHI_IN_STACK
1256 new_Phi_in_stack(void) {
1259 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1261 res->stack = NEW_ARR_F (ir_node *, 0);
1268 free_Phi_in_stack(Phi_in_stack *s) {
1269 DEL_ARR_F(s->stack);
1273 free_to_Phi_in_stack(ir_node *phi) {
1274 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1275 current_ir_graph->Phi_in_stack->pos)
1276 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1278 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1280 (current_ir_graph->Phi_in_stack->pos)++;
1283 static INLINE ir_node *
1284 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1285 int arity, ir_node **in) {
1287 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1288 int pos = current_ir_graph->Phi_in_stack->pos;
1292 /* We need to allocate a new node */
1293 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1294 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1296 /* reuse the old node and initialize it again. */
1299 assert (res->kind == k_ir_node);
1300 assert (res->op == op_Phi);
1304 assert (arity >= 0);
1305 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1306 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1308 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1310 (current_ir_graph->Phi_in_stack->pos)--;
1314 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1316 /* Creates a Phi node with a given, fixed array **in of predecessors.
1317 If the Phi node is unnecessary, as the same value reaches the block
1318 through all control flow paths, it is eliminated and the value
1319 returned directly. This constructor is only intended for use in
1320 the automatic Phi node generation triggered by get_value or mature.
1321 The implementation is quite tricky and depends on the fact, that
1322 the nodes are allocated on a stack:
1323 The in array contains predecessors and NULLs. The NULLs appear,
1324 if get_r_value_internal, that computed the predecessors, reached
1325 the same block on two paths. In this case the same value reaches
1326 this block on both paths, there is no definition in between. We need
1327 not allocate a Phi where these path's merge, but we have to communicate
1328 this fact to the caller. This happens by returning a pointer to the
1329 node the caller _will_ allocate. (Yes, we predict the address. We can
1330 do so because the nodes are allocated on the obstack.) The caller then
1331 finds a pointer to itself and, when this routine is called again,
1334 static INLINE ir_node *
1335 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1338 ir_node *res, *known;
1340 /* Allocate a new node on the obstack. This can return a node to
1341 which some of the pointers in the in-array already point.
1342 Attention: the constructor copies the in array, i.e., the later
1343 changes to the array in this routine do not affect the
1344 constructed node! If the in array contains NULLs, there will be
1345 missing predecessors in the returned node. Is this a possible
1346 internal state of the Phi node generation? */
1347 #if USE_EXPLICIT_PHI_IN_STACK
1348 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1350 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1351 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1354 /* The in-array can contain NULLs. These were returned by
1355 get_r_value_internal if it reached the same block/definition on a
1356 second path. The NULLs are replaced by the node itself to
1357 simplify the test in the next loop. */
1358 for (i = 0; i < ins; ++i) {
1363 /* This loop checks whether the Phi has more than one predecessor.
1364 If so, it is a real Phi node and we break the loop. Else the Phi
1365 node merges the same definition on several paths and therefore is
1367 for (i = 0; i < ins; ++i) {
1368 if (in[i] == res || in[i] == known)
1377 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1379 #if USE_EXPLICIT_PHI_IN_STACK
1380 free_to_Phi_in_stack(res);
1382 edges_node_deleted(res, current_ir_graph);
1383 obstack_free(current_ir_graph->obst, res);
1387 res = optimize_node (res);
1388 IRN_VRFY_IRG(res, irg);
1391 /* return the pointer to the Phi node. This node might be deallocated! */
1396 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1399 allocates and returns this node. The routine called to allocate the
1400 node might optimize it away and return a real value, or even a pointer
1401 to a deallocated Phi node on top of the obstack!
1402 This function is called with an in-array of proper size. **/
1404 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1406 ir_node *prevBlock, *res;
1409 /* This loop goes to all predecessor blocks of the block the Phi node is in
1410 and there finds the operands of the Phi node by calling
1411 get_r_value_internal. */
1412 for (i = 1; i <= ins; ++i) {
1413 assert (block->in[i]);
1414 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1416 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1419 /* After collecting all predecessors into the array nin a new Phi node
1420 with these predecessors is created. This constructor contains an
1421 optimization: If all predecessors of the Phi node are identical it
1422 returns the only operand instead of a new Phi node. If the value
1423 passes two different control flow edges without being defined, and
1424 this is the second path treated, a pointer to the node that will be
1425 allocated for the first path (recursion) is returned. We already
1426 know the address of this node, as it is the next node to be allocated
1427 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1428 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1430 /* Now we now the value for "pos" and can enter it in the array with
1431 all known local variables. Attention: this might be a pointer to
1432 a node, that later will be allocated!!! See new_rd_Phi_in.
1433 If this is called in mature, after some set_value in the same block,
1434 the proper value must not be overwritten:
1436 get_value (makes Phi0, put's it into graph_arr)
1437 set_value (overwrites Phi0 in graph_arr)
1438 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1441 if (!block->attr.block.graph_arr[pos]) {
1442 block->attr.block.graph_arr[pos] = res;
1444 /* printf(" value already computed by %s\n",
1445 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1451 /* This function returns the last definition of a variable. In case
1452 this variable was last defined in a previous block, Phi nodes are
1453 inserted. If the part of the firm graph containing the definition
1454 is not yet constructed, a dummy Phi node is returned. */
1456 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1459 /* There are 4 cases to treat.
1461 1. The block is not mature and we visit it the first time. We can not
1462 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1463 predecessors is returned. This node is added to the linked list (field
1464 "link") of the containing block to be completed when this block is
1465 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1468 2. The value is already known in this block, graph_arr[pos] is set and we
1469 visit the block the first time. We can return the value without
1470 creating any new nodes.
1472 3. The block is mature and we visit it the first time. A Phi node needs
1473 to be created (phi_merge). If the Phi is not needed, as all it's
1474 operands are the same value reaching the block through different
1475 paths, it's optimized away and the value itself is returned.
1477 4. The block is mature, and we visit it the second time. Now two
1478 subcases are possible:
1479 * The value was computed completely the last time we were here. This
1480 is the case if there is no loop. We can return the proper value.
1481 * The recursion that visited this node and set the flag did not
1482 return yet. We are computing a value in a loop and need to
1483 break the recursion without knowing the result yet.
1484 @@@ strange case. Straight forward we would create a Phi before
1485 starting the computation of it's predecessors. In this case we will
1486 find a Phi here in any case. The problem is that this implementation
1487 only creates a Phi after computing the predecessors, so that it is
1488 hard to compute self references of this Phi. @@@
1489 There is no simple check for the second subcase. Therefore we check
1490 for a second visit and treat all such cases as the second subcase.
1491 Anyways, the basic situation is the same: we reached a block
1492 on two paths without finding a definition of the value: No Phi
1493 nodes are needed on both paths.
1494 We return this information "Two paths, no Phi needed" by a very tricky
1495 implementation that relies on the fact that an obstack is a stack and
1496 will return a node with the same address on different allocations.
1497 Look also at phi_merge and new_rd_phi_in to understand this.
1498 @@@ Unfortunately this does not work, see testprogram
1499 three_cfpred_example.
1503 /* case 4 -- already visited. */
1504 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1506 /* visited the first time */
1507 set_irn_visited(block, get_irg_visited(current_ir_graph));
1509 /* Get the local valid value */
1510 res = block->attr.block.graph_arr[pos];
1512 /* case 2 -- If the value is actually computed, return it. */
1513 if (res) return res;
1515 if (block->attr.block.matured) { /* case 3 */
1517 /* The Phi has the same amount of ins as the corresponding block. */
1518 int ins = get_irn_arity(block);
1520 NEW_ARR_A (ir_node *, nin, ins);
1522 /* Phi merge collects the predecessors and then creates a node. */
1523 res = phi_merge (block, pos, mode, nin, ins);
1525 } else { /* case 1 */
1526 /* The block is not mature, we don't know how many in's are needed. A Phi
1527 with zero predecessors is created. Such a Phi node is called Phi0
1528 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1529 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1531 The Phi0 has to remember the pos of it's internal value. If the real
1532 Phi is computed, pos is used to update the array with the local
1535 res = new_rd_Phi0 (current_ir_graph, block, mode);
1536 res->attr.phi0_pos = pos;
1537 res->link = block->link;
1541 /* If we get here, the frontend missed a use-before-definition error */
1544 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1545 assert (mode->code >= irm_F && mode->code <= irm_P);
1546 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1547 tarval_mode_null[mode->code]);
1550 /* The local valid value is available now. */
1551 block->attr.block.graph_arr[pos] = res;
1559 it starts the recursion. This causes an Id at the entry of
1560 every block that has no definition of the value! **/
1562 #if USE_EXPLICIT_PHI_IN_STACK
1564 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1565 void free_Phi_in_stack(Phi_in_stack *s) { }
1568 static INLINE ir_node *
1569 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1570 ir_node **in, int ins, ir_node *phi0)
1573 ir_node *res, *known;
1575 /* Allocate a new node on the obstack. The allocation copies the in
1577 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1578 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1580 /* This loop checks whether the Phi has more than one predecessor.
1581 If so, it is a real Phi node and we break the loop. Else the
1582 Phi node merges the same definition on several paths and therefore
1583 is not needed. Don't consider Bad nodes! */
1585 for (i=0; i < ins; ++i)
1589 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1591 /* Optimize self referencing Phis: We can't detect them yet properly, as
1592 they still refer to the Phi0 they will replace. So replace right now. */
1593 if (phi0 && in[i] == phi0) in[i] = res;
1595 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1603 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1606 edges_node_deleted(res, current_ir_graph);
1607 obstack_free (current_ir_graph->obst, res);
1608 if (is_Phi(known)) {
1609 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1610 order, an enclosing Phi know may get superfluous. */
1611 res = optimize_in_place_2(known);
1613 exchange(known, res);
1619 /* A undefined value, e.g., in unreachable code. */
1623 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1624 IRN_VRFY_IRG(res, irg);
1625 /* Memory Phis in endless loops must be kept alive.
1626 As we can't distinguish these easily we keep all of them alive. */
1627 if ((res->op == op_Phi) && (mode == mode_M))
1628 add_End_keepalive(irg->end, res);
1635 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1637 #if PRECISE_EXC_CONTEXT
1639 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1641 /* Construct a new frag_array for node n.
1642 Copy the content from the current graph_arr of the corresponding block:
1643 this is the current state.
1644 Set ProjM(n) as current memory state.
1645 Further the last entry in frag_arr of current block points to n. This
1646 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1648 static INLINE ir_node ** new_frag_arr (ir_node *n)
1653 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1654 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1655 sizeof(ir_node *)*current_ir_graph->n_loc);
1657 /* turn off optimization before allocating Proj nodes, as res isn't
1659 opt = get_opt_optimize(); set_optimize(0);
1660 /* Here we rely on the fact that all frag ops have Memory as first result! */
1661 if (get_irn_op(n) == op_Call)
1662 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1664 assert((pn_Quot_M == pn_DivMod_M) &&
1665 (pn_Quot_M == pn_Div_M) &&
1666 (pn_Quot_M == pn_Mod_M) &&
1667 (pn_Quot_M == pn_Load_M) &&
1668 (pn_Quot_M == pn_Store_M) &&
1669 (pn_Quot_M == pn_Alloc_M) );
1670 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1674 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1679 * returns the frag_arr from a node
1681 static INLINE ir_node **
1682 get_frag_arr (ir_node *n) {
1683 switch (get_irn_opcode(n)) {
1685 return n->attr.call.exc.frag_arr;
1687 return n->attr.a.exc.frag_arr;
1689 return n->attr.load.exc.frag_arr;
1691 return n->attr.store.exc.frag_arr;
1693 return n->attr.except.frag_arr;
1698 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1700 if (!frag_arr[pos]) frag_arr[pos] = val;
1701 if (frag_arr[current_ir_graph->n_loc - 1]) {
1702 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1703 assert(arr != frag_arr && "Endless recursion detected");
1704 set_frag_value(arr, pos, val);
1709 for (i = 0; i < 1000; ++i) {
1710 if (!frag_arr[pos]) {
1711 frag_arr[pos] = val;
1713 if (frag_arr[current_ir_graph->n_loc - 1]) {
1714 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1720 assert(0 && "potential endless recursion");
1725 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1729 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1731 frag_arr = get_frag_arr(cfOp);
1732 res = frag_arr[pos];
1734 if (block->attr.block.graph_arr[pos]) {
1735 /* There was a set_value after the cfOp and no get_value before that
1736 set_value. We must build a Phi node now. */
1737 if (block->attr.block.matured) {
1738 int ins = get_irn_arity(block);
1740 NEW_ARR_A (ir_node *, nin, ins);
1741 res = phi_merge(block, pos, mode, nin, ins);
1743 res = new_rd_Phi0 (current_ir_graph, block, mode);
1744 res->attr.phi0_pos = pos;
1745 res->link = block->link;
1749 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1750 but this should be better: (remove comment if this works) */
1751 /* It's a Phi, we can write this into all graph_arrs with NULL */
1752 set_frag_value(block->attr.block.graph_arr, pos, res);
1754 res = get_r_value_internal(block, pos, mode);
1755 set_frag_value(block->attr.block.graph_arr, pos, res);
1763 computes the predecessors for the real phi node, and then
1764 allocates and returns this node. The routine called to allocate the
1765 node might optimize it away and return a real value.
1766 This function must be called with an in-array of proper size. **/
1768 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1770 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1773 /* If this block has no value at pos create a Phi0 and remember it
1774 in graph_arr to break recursions.
1775 Else we may not set graph_arr as there a later value is remembered. */
1777 if (!block->attr.block.graph_arr[pos]) {
1778 if (block == get_irg_start_block(current_ir_graph)) {
1779 /* Collapsing to Bad tarvals is no good idea.
1780 So we call a user-supplied routine here that deals with this case as
1781 appropriate for the given language. Sorrily the only help we can give
1782 here is the position.
1784 Even if all variables are defined before use, it can happen that
1785 we get to the start block, if a Cond has been replaced by a tuple
1786 (bad, jmp). In this case we call the function needlessly, eventually
1787 generating an non existent error.
1788 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1791 if (default_initialize_local_variable)
1792 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1794 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1795 /* We don't need to care about exception ops in the start block.
1796 There are none by definition. */
1797 return block->attr.block.graph_arr[pos];
1799 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1800 block->attr.block.graph_arr[pos] = phi0;
1801 #if PRECISE_EXC_CONTEXT
1802 if (get_opt_precise_exc_context()) {
1803 /* Set graph_arr for fragile ops. Also here we should break recursion.
1804 We could choose a cyclic path through an cfop. But the recursion would
1805 break at some point. */
1806 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1812 /* This loop goes to all predecessor blocks of the block the Phi node
1813 is in and there finds the operands of the Phi node by calling
1814 get_r_value_internal. */
1815 for (i = 1; i <= ins; ++i) {
1816 prevCfOp = skip_Proj(block->in[i]);
1818 if (is_Bad(prevCfOp)) {
1819 /* In case a Cond has been optimized we would get right to the start block
1820 with an invalid definition. */
1821 nin[i-1] = new_Bad();
1824 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1826 if (!is_Bad(prevBlock)) {
1827 #if PRECISE_EXC_CONTEXT
1828 if (get_opt_precise_exc_context() &&
1829 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1830 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1831 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1834 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1836 nin[i-1] = new_Bad();
1840 /* We want to pass the Phi0 node to the constructor: this finds additional
1841 optimization possibilities.
1842 The Phi0 node either is allocated in this function, or it comes from
1843 a former call to get_r_value_internal. In this case we may not yet
1844 exchange phi0, as this is done in mature_immBlock. */
1846 phi0_all = block->attr.block.graph_arr[pos];
1847 if (!((get_irn_op(phi0_all) == op_Phi) &&
1848 (get_irn_arity(phi0_all) == 0) &&
1849 (get_nodes_block(phi0_all) == block)))
1855 /* After collecting all predecessors into the array nin a new Phi node
1856 with these predecessors is created. This constructor contains an
1857 optimization: If all predecessors of the Phi node are identical it
1858 returns the only operand instead of a new Phi node. */
1859 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1861 /* In case we allocated a Phi0 node at the beginning of this procedure,
1862 we need to exchange this Phi0 with the real Phi. */
1864 exchange(phi0, res);
1865 block->attr.block.graph_arr[pos] = res;
1866 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1867 only an optimization. */
1873 /* This function returns the last definition of a variable. In case
1874 this variable was last defined in a previous block, Phi nodes are
1875 inserted. If the part of the firm graph containing the definition
1876 is not yet constructed, a dummy Phi node is returned. */
1878 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1881 /* There are 4 cases to treat.
1883 1. The block is not mature and we visit it the first time. We can not
1884 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1885 predecessors is returned. This node is added to the linked list (field
1886 "link") of the containing block to be completed when this block is
1887 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1890 2. The value is already known in this block, graph_arr[pos] is set and we
1891 visit the block the first time. We can return the value without
1892 creating any new nodes.
1894 3. The block is mature and we visit it the first time. A Phi node needs
1895 to be created (phi_merge). If the Phi is not needed, as all it's
1896 operands are the same value reaching the block through different
1897 paths, it's optimized away and the value itself is returned.
1899 4. The block is mature, and we visit it the second time. Now two
1900 subcases are possible:
1901 * The value was computed completely the last time we were here. This
1902 is the case if there is no loop. We can return the proper value.
1903 * The recursion that visited this node and set the flag did not
1904 return yet. We are computing a value in a loop and need to
1905 break the recursion. This case only happens if we visited
1906 the same block with phi_merge before, which inserted a Phi0.
1907 So we return the Phi0.
1910 /* case 4 -- already visited. */
1911 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1912 /* As phi_merge allocates a Phi0 this value is always defined. Here
1913 is the critical difference of the two algorithms. */
1914 assert(block->attr.block.graph_arr[pos]);
1915 return block->attr.block.graph_arr[pos];
1918 /* visited the first time */
1919 set_irn_visited(block, get_irg_visited(current_ir_graph));
1921 /* Get the local valid value */
1922 res = block->attr.block.graph_arr[pos];
1924 /* case 2 -- If the value is actually computed, return it. */
1925 if (res) { return res; };
1927 if (block->attr.block.matured) { /* case 3 */
1929 /* The Phi has the same amount of ins as the corresponding block. */
1930 int ins = get_irn_arity(block);
1932 NEW_ARR_A (ir_node *, nin, ins);
1934 /* Phi merge collects the predecessors and then creates a node. */
1935 res = phi_merge (block, pos, mode, nin, ins);
1937 } else { /* case 1 */
1938 /* The block is not mature, we don't know how many in's are needed. A Phi
1939 with zero predecessors is created. Such a Phi node is called Phi0
1940 node. The Phi0 is then added to the list of Phi0 nodes in this block
1941 to be matured by mature_immBlock later.
1942 The Phi0 has to remember the pos of it's internal value. If the real
1943 Phi is computed, pos is used to update the array with the local
1945 res = new_rd_Phi0 (current_ir_graph, block, mode);
1946 res->attr.phi0_pos = pos;
1947 res->link = block->link;
1951 /* If we get here, the frontend missed a use-before-definition error */
1954 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1955 assert (mode->code >= irm_F && mode->code <= irm_P);
1956 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1957 get_mode_null(mode));
1960 /* The local valid value is available now. */
1961 block->attr.block.graph_arr[pos] = res;
1966 #endif /* USE_FAST_PHI_CONSTRUCTION */
1968 /* ************************************************************************** */
1971 * Finalize a Block node, when all control flows are known.
1972 * Acceptable parameters are only Block nodes.
1975 mature_immBlock (ir_node *block)
1981 assert (get_irn_opcode(block) == iro_Block);
1982 /* @@@ should be commented in
1983 assert (!get_Block_matured(block) && "Block already matured"); */
1985 if (!get_Block_matured(block)) {
1986 ins = ARR_LEN (block->in)-1;
1987 /* Fix block parameters */
1988 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1990 /* An array for building the Phi nodes. */
1991 NEW_ARR_A (ir_node *, nin, ins);
1993 /* Traverse a chain of Phi nodes attached to this block and mature
1995 for (n = block->link; n; n=next) {
1996 inc_irg_visited(current_ir_graph);
1998 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2001 block->attr.block.matured = 1;
2003 /* Now, as the block is a finished firm node, we can optimize it.
2004 Since other nodes have been allocated since the block was created
2005 we can not free the node on the obstack. Therefore we have to call
2007 Unfortunately the optimization does not change a lot, as all allocated
2008 nodes refer to the unoptimized node.
2009 We can call _2, as global cse has no effect on blocks. */
2010 block = optimize_in_place_2(block);
2011 IRN_VRFY_IRG(block, current_ir_graph);
2016 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
2018 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
2023 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
2025 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
2030 new_d_Const_long(dbg_info* db, ir_mode *mode, long value)
2032 return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value);
2036 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
2038 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
2044 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
2046 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
2051 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
2053 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2058 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2061 assert(arg->op == op_Cond);
2062 arg->attr.c.kind = fragmentary;
2063 arg->attr.c.default_proj = max_proj;
2064 res = new_Proj (arg, mode_X, max_proj);
2069 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2071 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2076 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2078 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2082 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2084 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2089 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2091 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2096 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2098 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2104 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2106 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2111 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2113 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2118 * allocate the frag array
2120 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2121 if (get_opt_precise_exc_context()) {
2122 if ((current_ir_graph->phase_state == phase_building) &&
2123 (get_irn_op(res) == op) && /* Could be optimized away. */
2124 !*frag_store) /* Could be a cse where the arr is already set. */ {
2125 *frag_store = new_frag_arr(res);
2132 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2135 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2137 res->attr.except.pin_state = op_pin_state_pinned;
2138 #if PRECISE_EXC_CONTEXT
2139 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2146 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2149 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2151 res->attr.except.pin_state = op_pin_state_pinned;
2152 #if PRECISE_EXC_CONTEXT
2153 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2160 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2163 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2165 res->attr.except.pin_state = op_pin_state_pinned;
2166 #if PRECISE_EXC_CONTEXT
2167 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2174 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2177 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2179 res->attr.except.pin_state = op_pin_state_pinned;
2180 #if PRECISE_EXC_CONTEXT
2181 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2188 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2190 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2195 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2197 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2202 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2204 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2209 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2211 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2216 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2218 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2223 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2225 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2230 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2232 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2237 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2239 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2244 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2246 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2251 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2253 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2258 new_d_Jmp (dbg_info* db)
2260 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2264 new_d_IJmp (dbg_info* db, ir_node *tgt)
2266 return new_rd_IJmp (db, current_ir_graph, current_ir_graph->current_block, tgt);
2270 new_d_Cond (dbg_info* db, ir_node *c)
2272 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2276 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2280 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2281 store, callee, arity, in, tp);
2282 #if PRECISE_EXC_CONTEXT
2283 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2290 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2292 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2297 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2299 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2304 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2307 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2309 #if PRECISE_EXC_CONTEXT
2310 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2317 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2320 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2322 #if PRECISE_EXC_CONTEXT
2323 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2330 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2334 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2335 store, size, alloc_type, where);
2336 #if PRECISE_EXC_CONTEXT
2337 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2344 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2345 ir_node *size, type *free_type, where_alloc where)
2347 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2348 store, ptr, size, free_type, where);
2352 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2353 /* GL: objptr was called frame before. Frame was a bad choice for the name
2354 as the operand could as well be a pointer to a dynamic object. */
2356 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2357 store, objptr, 0, NULL, ent);
2361 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2363 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2364 store, objptr, n_index, index, sel);
2368 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2370 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2371 store, objptr, ent));
2375 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2377 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2382 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2384 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2389 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2391 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2398 return _new_d_Bad();
2402 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2404 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2409 new_d_Unknown (ir_mode *m)
2411 return new_rd_Unknown(current_ir_graph, m);
2415 new_d_CallBegin (dbg_info *db, ir_node *call)
2418 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2423 new_d_EndReg (dbg_info *db)
2426 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2431 new_d_EndExcept (dbg_info *db)
2434 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2439 new_d_Break (dbg_info *db)
2441 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2445 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2447 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2454 return _new_d_NoMem();
2458 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2459 ir_node *ir_true, ir_mode *mode) {
2460 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2461 sel, ir_false, ir_true, mode);
2464 /* ********************************************************************* */
2465 /* Comfortable interface with automatic Phi node construction. */
2466 /* (Uses also constructors of ?? interface, except new_Block. */
2467 /* ********************************************************************* */
2469 /* Block construction */
2470 /* immature Block without predecessors */
2471 ir_node *new_d_immBlock (dbg_info* db) {
2474 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2475 /* creates a new dynamic in-array as length of in is -1 */
2476 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2477 current_ir_graph->current_block = res;
2478 res->attr.block.matured = 0;
2479 res->attr.block.dead = 0;
2480 /* res->attr.block.exc = exc_normal; */
2481 /* res->attr.block.handler_entry = 0; */
2482 res->attr.block.irg = current_ir_graph;
2483 res->attr.block.backedge = NULL;
2484 res->attr.block.in_cg = NULL;
2485 res->attr.block.cg_backedge = NULL;
2486 set_Block_block_visited(res, 0);
2488 /* Create and initialize array for Phi-node construction. */
2489 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2490 current_ir_graph->n_loc);
2491 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2493 /* Immature block may not be optimized! */
2494 IRN_VRFY_IRG(res, current_ir_graph);
2500 new_immBlock (void) {
2501 return new_d_immBlock(NULL);
2504 /* add an edge to a jmp/control flow node */
2506 add_immBlock_pred (ir_node *block, ir_node *jmp)
2508 if (block->attr.block.matured) {
2509 assert(0 && "Error: Block already matured!\n");
2512 assert(jmp != NULL);
2513 ARR_APP1(ir_node *, block->in, jmp);
2517 /* changing the current block */
2519 set_cur_block (ir_node *target) {
2520 current_ir_graph->current_block = target;
2523 /* ************************ */
2524 /* parameter administration */
2526 /* get a value from the parameter array from the current block by its index */
2528 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2530 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2531 inc_irg_visited(current_ir_graph);
2533 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2535 /* get a value from the parameter array from the current block by its index */
2537 get_value (int pos, ir_mode *mode)
2539 return get_d_value(NULL, pos, mode);
2542 /* set a value at position pos in the parameter array from the current block */
2544 set_value (int pos, ir_node *value)
2546 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2547 assert(pos+1 < current_ir_graph->n_loc);
2548 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2551 /* get the current store */
2555 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2556 /* GL: one could call get_value instead */
2557 inc_irg_visited(current_ir_graph);
2558 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2561 /* set the current store */
2563 set_store (ir_node *store)
2565 /* GL: one could call set_value instead */
2566 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2567 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2571 keep_alive (ir_node *ka) {
2572 add_End_keepalive(current_ir_graph->end, ka);
2575 /* --- Useful access routines --- */
2576 /* Returns the current block of the current graph. To set the current
2577 block use set_cur_block. */
2578 ir_node *get_cur_block(void) {
2579 return get_irg_current_block(current_ir_graph);
2582 /* Returns the frame type of the current graph */
2583 type *get_cur_frame_type(void) {
2584 return get_irg_frame_type(current_ir_graph);
2588 /* ********************************************************************* */
2591 /* call once for each run of the library */
2593 init_cons(uninitialized_local_variable_func_t *func)
2595 default_initialize_local_variable = func;
2598 /* call for each graph */
2600 irg_finalize_cons (ir_graph *irg) {
2601 irg->phase_state = phase_high;
2605 irp_finalize_cons (void) {
2606 int i, n_irgs = get_irp_n_irgs();
2607 for (i = 0; i < n_irgs; i++) {
2608 irg_finalize_cons(get_irp_irg(i));
2610 irp->phase_state = phase_high;\
2616 ir_node *new_Block(int arity, ir_node **in) {
2617 return new_d_Block(NULL, arity, in);
2619 ir_node *new_Start (void) {
2620 return new_d_Start(NULL);
2622 ir_node *new_End (void) {
2623 return new_d_End(NULL);
2625 ir_node *new_Jmp (void) {
2626 return new_d_Jmp(NULL);
2628 ir_node *new_IJmp (ir_node *tgt) {
2629 return new_d_IJmp(NULL, tgt);
2631 ir_node *new_Cond (ir_node *c) {
2632 return new_d_Cond(NULL, c);
2634 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2635 return new_d_Return(NULL, store, arity, in);
2637 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2638 return new_d_Raise(NULL, store, obj);
2640 ir_node *new_Const (ir_mode *mode, tarval *con) {
2641 return new_d_Const(NULL, mode, con);
2644 ir_node *new_Const_long(ir_mode *mode, long value)
2646 return new_d_Const_long(NULL, mode, value);
2649 ir_node *new_Const_type(tarval *con, type *tp) {
2650 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2653 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2654 return new_d_SymConst(NULL, value, kind);
2656 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2657 return new_d_simpleSel(NULL, store, objptr, ent);
2659 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2661 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2663 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2664 return new_d_InstOf (NULL, store, objptr, ent);
2666 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2668 return new_d_Call(NULL, store, callee, arity, in, tp);
2670 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2671 return new_d_Add(NULL, op1, op2, mode);
2673 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2674 return new_d_Sub(NULL, op1, op2, mode);
2676 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2677 return new_d_Minus(NULL, op, mode);
2679 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2680 return new_d_Mul(NULL, op1, op2, mode);
2682 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2683 return new_d_Quot(NULL, memop, op1, op2);
2685 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2686 return new_d_DivMod(NULL, memop, op1, op2);
2688 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2689 return new_d_Div(NULL, memop, op1, op2);
2691 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2692 return new_d_Mod(NULL, memop, op1, op2);
2694 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2695 return new_d_Abs(NULL, op, mode);
2697 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2698 return new_d_And(NULL, op1, op2, mode);
2700 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2701 return new_d_Or(NULL, op1, op2, mode);
2703 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2704 return new_d_Eor(NULL, op1, op2, mode);
2706 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2707 return new_d_Not(NULL, op, mode);
2709 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2710 return new_d_Shl(NULL, op, k, mode);
2712 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2713 return new_d_Shr(NULL, op, k, mode);
2715 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2716 return new_d_Shrs(NULL, op, k, mode);
2718 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2719 return new_d_Rot(NULL, op, k, mode);
2721 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2722 return new_d_Cmp(NULL, op1, op2);
2724 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2725 return new_d_Conv(NULL, op, mode);
2727 ir_node *new_Cast (ir_node *op, type *to_tp) {
2728 return new_d_Cast(NULL, op, to_tp);
2730 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2731 return new_d_Phi(NULL, arity, in, mode);
2733 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2734 return new_d_Load(NULL, store, addr, mode);
2736 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2737 return new_d_Store(NULL, store, addr, val);
2739 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2740 where_alloc where) {
2741 return new_d_Alloc(NULL, store, size, alloc_type, where);
2743 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2744 type *free_type, where_alloc where) {
2745 return new_d_Free(NULL, store, ptr, size, free_type, where);
2747 ir_node *new_Sync (int arity, ir_node **in) {
2748 return new_d_Sync(NULL, arity, in);
2750 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2751 return new_d_Proj(NULL, arg, mode, proj);
2753 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2754 return new_d_defaultProj(NULL, arg, max_proj);
2756 ir_node *new_Tuple (int arity, ir_node **in) {
2757 return new_d_Tuple(NULL, arity, in);
2759 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2760 return new_d_Id(NULL, val, mode);
2762 ir_node *new_Bad (void) {
2765 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2766 return new_d_Confirm (NULL, val, bound, cmp);
2768 ir_node *new_Unknown(ir_mode *m) {
2769 return new_d_Unknown(m);
2771 ir_node *new_CallBegin (ir_node *callee) {
2772 return new_d_CallBegin(NULL, callee);
2774 ir_node *new_EndReg (void) {
2775 return new_d_EndReg(NULL);
2777 ir_node *new_EndExcept (void) {
2778 return new_d_EndExcept(NULL);
2780 ir_node *new_Break (void) {
2781 return new_d_Break(NULL);
2783 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2784 return new_d_Filter(NULL, arg, mode, proj);
2786 ir_node *new_NoMem (void) {
2787 return new_d_NoMem();
2789 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2790 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);