3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler, Michael Beck
10 * Copyright: (c) 1998-2007 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
19 #include "irgraph_t.h"
23 #include "firm_common_t.h"
30 #include "irbackedge_t.h"
32 #include "iredges_t.h"
36 #if USE_EXPLICIT_PHI_IN_STACK
37 /* A stack needed for the automatic Phi node construction in constructor
38 Phi_in. Redefinition in irgraph.c!! */
43 typedef struct Phi_in_stack Phi_in_stack;
46 /* when we need verifying */
48 # define IRN_VRFY_IRG(res, irg)
50 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
54 * Language dependent variable initialization callback.
56 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
58 /* creates a bd constructor for a binop */
59 #define NEW_BD_BINOP(instr) \
61 new_bd_##instr(dbg_info *db, ir_node *block, \
62 ir_node *op1, ir_node *op2, ir_mode *mode) \
66 ir_graph *irg = current_ir_graph; \
69 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
70 res = optimize_node(res); \
71 IRN_VRFY_IRG(res, irg); \
75 /* creates a bd constructor for an unop */
76 #define NEW_BD_UNOP(instr) \
78 new_bd_##instr(dbg_info *db, ir_node *block, \
79 ir_node *op, ir_mode *mode) \
82 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an divop */
90 #define NEW_BD_DIVOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *memop, ir_node *op1, ir_node *op2) \
97 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
102 res = optimize_node(res); \
103 IRN_VRFY_IRG(res, irg); \
107 /* creates a rd constructor for a binop */
108 #define NEW_RD_BINOP(instr) \
110 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
111 ir_node *op1, ir_node *op2, ir_mode *mode) \
114 ir_graph *rem = current_ir_graph; \
115 current_ir_graph = irg; \
116 res = new_bd_##instr(db, block, op1, op2, mode); \
117 current_ir_graph = rem; \
121 /* creates a rd constructor for an unop */
122 #define NEW_RD_UNOP(instr) \
124 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
125 ir_node *op, ir_mode *mode) \
128 ir_graph *rem = current_ir_graph; \
129 current_ir_graph = irg; \
130 res = new_bd_##instr(db, block, op, mode); \
131 current_ir_graph = rem; \
135 /* creates a rd constructor for an divop */
136 #define NEW_RD_DIVOP(instr) \
138 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
139 ir_node *memop, ir_node *op1, ir_node *op2) \
142 ir_graph *rem = current_ir_graph; \
143 current_ir_graph = irg; \
144 res = new_bd_##instr(db, block, memop, op1, op2); \
145 current_ir_graph = rem; \
149 /* creates a d constructor for an binop */
150 #define NEW_D_BINOP(instr) \
152 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
153 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
156 /* creates a d constructor for an unop */
157 #define NEW_D_UNOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
165 * Constructs a Block with a fixed number of predecessors.
166 * Does not set current_block. Can not be used with automatic
167 * Phi node construction.
170 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
172 ir_graph *irg = current_ir_graph;
174 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
175 set_Block_matured(res, 1);
176 set_Block_block_visited(res, 0);
178 /* res->attr.block.exc = exc_normal; */
179 /* res->attr.block.handler_entry = 0; */
180 res->attr.block.dead = 0;
181 res->attr.block.irg = irg;
182 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
183 res->attr.block.in_cg = NULL;
184 res->attr.block.cg_backedge = NULL;
185 res->attr.block.extblk = NULL;
187 IRN_VRFY_IRG(res, irg);
192 new_bd_Start(dbg_info *db, ir_node *block) {
194 ir_graph *irg = current_ir_graph;
196 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
197 /* res->attr.start.irg = irg; */
199 IRN_VRFY_IRG(res, irg);
204 new_bd_End(dbg_info *db, ir_node *block) {
206 ir_graph *irg = current_ir_graph;
208 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
210 IRN_VRFY_IRG(res, irg);
215 * Creates a Phi node with all predecessors. Calling this constructor
216 * is only allowed if the corresponding block is mature.
219 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
221 ir_graph *irg = current_ir_graph;
225 /* Don't assert that block matured: the use of this constructor is strongly
227 if ( get_Block_matured(block) )
228 assert( get_irn_arity(block) == arity );
230 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
232 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
234 for (i = arity-1; i >= 0; i--)
235 if (get_irn_op(in[i]) == op_Unknown) {
240 if (!has_unknown) res = optimize_node (res);
241 IRN_VRFY_IRG(res, irg);
243 /* Memory Phis in endless loops must be kept alive.
244 As we can't distinguish these easily we keep all of them alive. */
245 if ((res->op == op_Phi) && (mode == mode_M))
246 add_End_keepalive(get_irg_end(irg), res);
251 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
253 ir_graph *irg = current_ir_graph;
255 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
256 res->attr.con.tv = con;
257 set_Const_type(res, tp); /* Call method because of complex assertion. */
258 res = optimize_node (res);
259 assert(get_Const_type(res) == tp);
260 IRN_VRFY_IRG(res, irg);
263 } /* new_bd_Const_type */
266 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
267 ir_graph *irg = current_ir_graph;
269 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
273 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
274 ir_graph *irg = current_ir_graph;
276 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
277 } /* new_bd_Const_long */
280 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
282 ir_graph *irg = current_ir_graph;
284 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
285 res = optimize_node(res);
286 IRN_VRFY_IRG(res, irg);
291 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
294 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
297 res->attr.proj = proj;
300 assert(get_Proj_pred(res));
301 assert(get_nodes_block(get_Proj_pred(res)));
303 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
313 ir_graph *irg = current_ir_graph;
315 assert(arg->op == op_Cond);
316 arg->attr.cond.kind = fragmentary;
317 arg->attr.cond.default_proj = max_proj;
318 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
320 } /* new_bd_defaultProj */
323 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
325 ir_graph *irg = current_ir_graph;
327 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
328 res->attr.conv.strict = strict_flag;
329 res = optimize_node(res);
330 IRN_VRFY_IRG(res, irg);
335 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
337 ir_graph *irg = current_ir_graph;
339 assert(is_atomic_type(to_tp));
341 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
342 res->attr.cast.totype = to_tp;
343 res = optimize_node(res);
344 IRN_VRFY_IRG(res, irg);
349 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
351 ir_graph *irg = current_ir_graph;
353 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
354 res = optimize_node (res);
355 IRN_VRFY_IRG(res, irg);
380 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
383 ir_graph *irg = current_ir_graph;
386 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
387 res = optimize_node(res);
388 IRN_VRFY_IRG(res, irg);
393 new_bd_Jmp(dbg_info *db, ir_node *block) {
395 ir_graph *irg = current_ir_graph;
397 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
398 res = optimize_node (res);
399 IRN_VRFY_IRG(res, irg);
404 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
406 ir_graph *irg = current_ir_graph;
408 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
409 res = optimize_node (res);
410 IRN_VRFY_IRG(res, irg);
412 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
418 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
420 ir_graph *irg = current_ir_graph;
422 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
423 res->attr.cond.kind = dense;
424 res->attr.cond.default_proj = 0;
425 res->attr.cond.pred = COND_JMP_PRED_NONE;
426 res = optimize_node (res);
427 IRN_VRFY_IRG(res, irg);
432 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
433 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
437 ir_graph *irg = current_ir_graph;
440 NEW_ARR_A(ir_node *, r_in, r_arity);
443 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
445 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
447 assert((get_unknown_type() == tp) || is_Method_type(tp));
448 set_Call_type(res, tp);
449 res->attr.call.exc.pin_state = op_pin_state_pinned;
450 res->attr.call.callee_arr = NULL;
451 res = optimize_node(res);
452 IRN_VRFY_IRG(res, irg);
457 new_bd_Return(dbg_info *db, ir_node *block,
458 ir_node *store, int arity, ir_node **in) {
462 ir_graph *irg = current_ir_graph;
465 NEW_ARR_A (ir_node *, r_in, r_arity);
467 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
468 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
469 res = optimize_node(res);
470 IRN_VRFY_IRG(res, irg);
472 } /* new_bd_Return */
475 new_bd_Load(dbg_info *db, ir_node *block,
476 ir_node *store, ir_node *adr, ir_mode *mode) {
479 ir_graph *irg = current_ir_graph;
483 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
484 res->attr.load.exc.pin_state = op_pin_state_pinned;
485 res->attr.load.load_mode = mode;
486 res->attr.load.volatility = volatility_non_volatile;
487 res = optimize_node(res);
488 IRN_VRFY_IRG(res, irg);
493 new_bd_Store(dbg_info *db, ir_node *block,
494 ir_node *store, ir_node *adr, ir_node *val) {
497 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
503 res->attr.store.exc.pin_state = op_pin_state_pinned;
504 res->attr.store.volatility = volatility_non_volatile;
505 res = optimize_node(res);
506 IRN_VRFY_IRG(res, irg);
511 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
512 ir_node *size, ir_type *alloc_type, where_alloc where) {
515 ir_graph *irg = current_ir_graph;
519 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
520 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
521 res->attr.alloc.where = where;
522 res->attr.alloc.type = alloc_type;
523 res = optimize_node(res);
524 IRN_VRFY_IRG(res, irg);
529 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
530 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
533 ir_graph *irg = current_ir_graph;
538 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
539 res->attr.free.where = where;
540 res->attr.free.type = free_type;
541 res = optimize_node(res);
542 IRN_VRFY_IRG(res, irg);
547 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
548 int arity, ir_node **in, ir_entity *ent) {
552 ir_graph *irg = current_ir_graph;
553 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
555 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
558 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
561 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
563 * Sel's can select functions which should be of mode mode_P_code.
565 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
566 res->attr.sel.ent = ent;
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
574 symconst_kind symkind, ir_type *tp) {
577 ir_graph *irg = current_ir_graph;
579 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
580 mode = mode_P_data; /* FIXME: can be mode_P_code */
584 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
586 res->attr.symc.num = symkind;
587 res->attr.symc.sym = value;
588 res->attr.symc.tp = tp;
590 res = optimize_node(res);
591 IRN_VRFY_IRG(res, irg);
593 } /* new_bd_SymConst_type */
596 new_bd_Sync(dbg_info *db, ir_node *block) {
598 ir_graph *irg = current_ir_graph;
600 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
601 /* no need to call optimize node here, Sync are always created with no predecessors */
602 IRN_VRFY_IRG(res, irg);
607 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
608 ir_node *in[2], *res;
609 ir_graph *irg = current_ir_graph;
613 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
614 res->attr.confirm_cmp = cmp;
615 res = optimize_node (res);
616 IRN_VRFY_IRG(res, irg);
618 } /* new_bd_Confirm */
621 new_bd_Unknown(ir_mode *m) {
623 ir_graph *irg = current_ir_graph;
625 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
626 res = optimize_node(res);
628 } /* new_bd_Unknown */
631 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
634 ir_graph *irg = current_ir_graph;
636 in[0] = get_Call_ptr(call);
637 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
638 /* res->attr.callbegin.irg = irg; */
639 res->attr.callbegin.call = call;
640 res = optimize_node(res);
641 IRN_VRFY_IRG(res, irg);
643 } /* new_bd_CallBegin */
646 new_bd_EndReg(dbg_info *db, ir_node *block) {
648 ir_graph *irg = current_ir_graph;
650 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
651 set_irg_end_reg(irg, res);
652 IRN_VRFY_IRG(res, irg);
654 } /* new_bd_EndReg */
657 new_bd_EndExcept(dbg_info *db, ir_node *block) {
659 ir_graph *irg = current_ir_graph;
661 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
662 set_irg_end_except(irg, res);
663 IRN_VRFY_IRG (res, irg);
665 } /* new_bd_EndExcept */
668 new_bd_Break(dbg_info *db, ir_node *block) {
670 ir_graph *irg = current_ir_graph;
672 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
673 res = optimize_node(res);
674 IRN_VRFY_IRG(res, irg);
679 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
682 ir_graph *irg = current_ir_graph;
684 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
685 res->attr.filter.proj = proj;
686 res->attr.filter.in_cg = NULL;
687 res->attr.filter.backedge = NULL;
690 assert(get_Proj_pred(res));
691 assert(get_nodes_block(get_Proj_pred(res)));
693 res = optimize_node(res);
694 IRN_VRFY_IRG(res, irg);
696 } /* new_bd_Filter */
699 new_bd_Mux(dbg_info *db, ir_node *block,
700 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
703 ir_graph *irg = current_ir_graph;
709 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
712 res = optimize_node(res);
713 IRN_VRFY_IRG(res, irg);
718 new_bd_Psi(dbg_info *db, ir_node *block,
719 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
722 ir_graph *irg = current_ir_graph;
725 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
727 for (i = 0; i < arity; ++i) {
729 in[2 * i + 1] = vals[i];
733 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
736 res = optimize_node(res);
737 IRN_VRFY_IRG(res, irg);
742 new_bd_CopyB(dbg_info *db, ir_node *block,
743 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
746 ir_graph *irg = current_ir_graph;
752 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
754 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
755 res->attr.copyb.data_type = data_type;
756 res = optimize_node(res);
757 IRN_VRFY_IRG(res, irg);
762 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
763 ir_node *objptr, ir_type *type) {
766 ir_graph *irg = current_ir_graph;
770 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
771 res->attr.instof.type = type;
772 res = optimize_node(res);
773 IRN_VRFY_IRG(res, irg);
775 } /* new_bd_InstOf */
778 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
781 ir_graph *irg = current_ir_graph;
785 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
786 res = optimize_node(res);
787 IRN_VRFY_IRG(res, irg);
792 new_bd_Bound(dbg_info *db, ir_node *block,
793 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
796 ir_graph *irg = current_ir_graph;
802 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
803 res->attr.bound.exc.pin_state = op_pin_state_pinned;
804 res = optimize_node(res);
805 IRN_VRFY_IRG(res, irg);
810 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
812 ir_graph *irg = current_ir_graph;
814 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
815 res = optimize_node(res);
816 IRN_VRFY_IRG(res, irg);
820 /* --------------------------------------------- */
821 /* private interfaces, for professional use only */
822 /* --------------------------------------------- */
824 /* Constructs a Block with a fixed number of predecessors.
825 Does not set current_block. Can not be used with automatic
826 Phi node construction. */
828 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
829 ir_graph *rem = current_ir_graph;
832 current_ir_graph = irg;
833 res = new_bd_Block(db, arity, in);
834 current_ir_graph = rem;
840 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
841 ir_graph *rem = current_ir_graph;
844 current_ir_graph = irg;
845 res = new_bd_Start(db, block);
846 current_ir_graph = rem;
852 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
854 ir_graph *rem = current_ir_graph;
856 current_ir_graph = rem;
857 res = new_bd_End(db, block);
858 current_ir_graph = rem;
863 /* Creates a Phi node with all predecessors. Calling this constructor
864 is only allowed if the corresponding block is mature. */
866 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
868 ir_graph *rem = current_ir_graph;
870 current_ir_graph = irg;
871 res = new_bd_Phi(db, block,arity, in, mode);
872 current_ir_graph = rem;
878 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
880 ir_graph *rem = current_ir_graph;
882 current_ir_graph = irg;
883 res = new_bd_Const_type(db, block, mode, con, tp);
884 current_ir_graph = rem;
887 } /* new_rd_Const_type */
890 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
892 ir_graph *rem = current_ir_graph;
894 current_ir_graph = irg;
895 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
896 current_ir_graph = rem;
902 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
903 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
904 } /* new_rd_Const_long */
907 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
909 ir_graph *rem = current_ir_graph;
911 current_ir_graph = irg;
912 res = new_bd_Id(db, block, val, mode);
913 current_ir_graph = rem;
919 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
922 ir_graph *rem = current_ir_graph;
924 current_ir_graph = irg;
925 res = new_bd_Proj(db, block, arg, mode, proj);
926 current_ir_graph = rem;
932 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
935 ir_graph *rem = current_ir_graph;
937 current_ir_graph = irg;
938 res = new_bd_defaultProj(db, block, arg, max_proj);
939 current_ir_graph = rem;
942 } /* new_rd_defaultProj */
945 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
947 ir_graph *rem = current_ir_graph;
949 current_ir_graph = irg;
950 res = new_bd_Conv(db, block, op, mode, 0);
951 current_ir_graph = rem;
957 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
959 ir_graph *rem = current_ir_graph;
961 current_ir_graph = irg;
962 res = new_bd_Cast(db, block, op, to_tp);
963 current_ir_graph = rem;
969 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
971 ir_graph *rem = current_ir_graph;
973 current_ir_graph = irg;
974 res = new_bd_Tuple(db, block, arity, in);
975 current_ir_graph = rem;
1001 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1002 ir_node *op1, ir_node *op2) {
1004 ir_graph *rem = current_ir_graph;
1006 current_ir_graph = irg;
1007 res = new_bd_Cmp(db, block, op1, op2);
1008 current_ir_graph = rem;
1014 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1016 ir_graph *rem = current_ir_graph;
1018 current_ir_graph = irg;
1019 res = new_bd_Jmp(db, block);
1020 current_ir_graph = rem;
1026 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1028 ir_graph *rem = current_ir_graph;
1030 current_ir_graph = irg;
1031 res = new_bd_IJmp(db, block, tgt);
1032 current_ir_graph = rem;
1038 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1040 ir_graph *rem = current_ir_graph;
1042 current_ir_graph = irg;
1043 res = new_bd_Cond(db, block, c);
1044 current_ir_graph = rem;
1050 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1051 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1053 ir_graph *rem = current_ir_graph;
1055 current_ir_graph = irg;
1056 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1057 current_ir_graph = rem;
1063 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1064 ir_node *store, int arity, ir_node **in) {
1066 ir_graph *rem = current_ir_graph;
1068 current_ir_graph = irg;
1069 res = new_bd_Return(db, block, store, arity, in);
1070 current_ir_graph = rem;
1073 } /* new_rd_Return */
1076 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1077 ir_node *store, ir_node *adr, ir_mode *mode) {
1079 ir_graph *rem = current_ir_graph;
1081 current_ir_graph = irg;
1082 res = new_bd_Load(db, block, store, adr, mode);
1083 current_ir_graph = rem;
1089 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1090 ir_node *store, ir_node *adr, ir_node *val) {
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_Store(db, block, store, adr, val);
1096 current_ir_graph = rem;
1099 } /* new_rd_Store */
1102 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1103 ir_node *size, ir_type *alloc_type, where_alloc where) {
1105 ir_graph *rem = current_ir_graph;
1107 current_ir_graph = irg;
1108 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1109 current_ir_graph = rem;
1112 } /* new_rd_Alloc */
1115 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1116 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1118 ir_graph *rem = current_ir_graph;
1120 current_ir_graph = irg;
1121 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1122 current_ir_graph = rem;
1128 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1129 ir_node *store, ir_node *objptr, ir_entity *ent) {
1131 ir_graph *rem = current_ir_graph;
1133 current_ir_graph = irg;
1134 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1135 current_ir_graph = rem;
1138 } /* new_rd_simpleSel */
1141 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1142 int arity, ir_node **in, ir_entity *ent) {
1144 ir_graph *rem = current_ir_graph;
1146 current_ir_graph = irg;
1147 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1148 current_ir_graph = rem;
1154 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1155 symconst_kind symkind, ir_type *tp) {
1157 ir_graph *rem = current_ir_graph;
1159 current_ir_graph = irg;
1160 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1161 current_ir_graph = rem;
1164 } /* new_rd_SymConst_type */
1167 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1168 symconst_kind symkind) {
1169 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1171 } /* new_rd_SymConst */
1173 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1174 symconst_symbol sym;
1175 sym.entity_p = symbol;
1176 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1177 } /* new_rd_SymConst_addr_ent */
1179 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1180 symconst_symbol sym;
1181 sym.entity_p = symbol;
1182 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1183 } /* new_rd_SymConst_ofs_ent */
1185 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1186 symconst_symbol sym;
1187 sym.ident_p = symbol;
1188 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1189 } /* new_rd_SymConst_addr_name */
1191 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1192 symconst_symbol sym;
1193 sym.type_p = symbol;
1194 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1195 } /* new_rd_SymConst_type_tag */
1197 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1198 symconst_symbol sym;
1199 sym.type_p = symbol;
1200 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1201 } /* new_rd_SymConst_size */
1203 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1204 symconst_symbol sym;
1205 sym.type_p = symbol;
1206 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1207 } /* new_rd_SymConst_align */
1210 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1212 ir_graph *rem = current_ir_graph;
1215 current_ir_graph = irg;
1216 res = new_bd_Sync(db, block);
1217 current_ir_graph = rem;
1219 for (i = 0; i < arity; ++i)
1220 add_Sync_pred(res, in[i]);
1226 new_rd_Bad(ir_graph *irg) {
1227 return get_irg_bad(irg);
1231 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1233 ir_graph *rem = current_ir_graph;
1235 current_ir_graph = irg;
1236 res = new_bd_Confirm(db, block, val, bound, cmp);
1237 current_ir_graph = rem;
1240 } /* new_rd_Confirm */
1243 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1245 ir_graph *rem = current_ir_graph;
1247 current_ir_graph = irg;
1248 res = new_bd_Unknown(m);
1249 current_ir_graph = rem;
1252 } /* new_rd_Unknown */
1255 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1257 ir_graph *rem = current_ir_graph;
1259 current_ir_graph = irg;
1260 res = new_bd_CallBegin(db, block, call);
1261 current_ir_graph = rem;
1264 } /* new_rd_CallBegin */
1267 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1270 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1271 set_irg_end_reg(irg, res);
1272 IRN_VRFY_IRG(res, irg);
1274 } /* new_rd_EndReg */
1277 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1280 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1281 set_irg_end_except(irg, res);
1282 IRN_VRFY_IRG (res, irg);
1284 } /* new_rd_EndExcept */
1287 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1289 ir_graph *rem = current_ir_graph;
1291 current_ir_graph = irg;
1292 res = new_bd_Break(db, block);
1293 current_ir_graph = rem;
1296 } /* new_rd_Break */
1299 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1302 ir_graph *rem = current_ir_graph;
1304 current_ir_graph = irg;
1305 res = new_bd_Filter(db, block, arg, mode, proj);
1306 current_ir_graph = rem;
1309 } /* new_rd_Filter */
1312 new_rd_NoMem(ir_graph *irg) {
1313 return get_irg_no_mem(irg);
1314 } /* new_rd_NoMem */
1317 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1318 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1320 ir_graph *rem = current_ir_graph;
1322 current_ir_graph = irg;
1323 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1324 current_ir_graph = rem;
1330 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1331 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1333 ir_graph *rem = current_ir_graph;
1335 current_ir_graph = irg;
1336 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1337 current_ir_graph = rem;
1342 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1343 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1345 ir_graph *rem = current_ir_graph;
1347 current_ir_graph = irg;
1348 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1349 current_ir_graph = rem;
1352 } /* new_rd_CopyB */
1355 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1356 ir_node *objptr, ir_type *type) {
1358 ir_graph *rem = current_ir_graph;
1360 current_ir_graph = irg;
1361 res = new_bd_InstOf(db, block, store, objptr, type);
1362 current_ir_graph = rem;
1365 } /* new_rd_InstOf */
1368 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1370 ir_graph *rem = current_ir_graph;
1372 current_ir_graph = irg;
1373 res = new_bd_Raise(db, block, store, obj);
1374 current_ir_graph = rem;
1377 } /* new_rd_Raise */
1379 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1380 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1382 ir_graph *rem = current_ir_graph;
1384 current_ir_graph = irg;
1385 res = new_bd_Bound(db, block, store, idx, lower, upper);
1386 current_ir_graph = rem;
1389 } /* new_rd_Bound */
1391 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1393 ir_graph *rem = current_ir_graph;
1395 current_ir_graph = irg;
1396 res = new_bd_Pin(db, block, node);
1397 current_ir_graph = rem;
1402 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1403 return new_rd_Block(NULL, irg, arity, in);
1405 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1406 return new_rd_Start(NULL, irg, block);
1408 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1409 return new_rd_End(NULL, irg, block);
1411 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1412 return new_rd_Jmp(NULL, irg, block);
1414 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1415 return new_rd_IJmp(NULL, irg, block, tgt);
1417 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1418 return new_rd_Cond(NULL, irg, block, c);
1420 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1421 ir_node *store, int arity, ir_node **in) {
1422 return new_rd_Return(NULL, irg, block, store, arity, in);
1424 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1425 ir_mode *mode, tarval *con) {
1426 return new_rd_Const(NULL, irg, block, mode, con);
1428 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1429 ir_mode *mode, long value) {
1430 return new_rd_Const_long(NULL, irg, block, mode, value);
1432 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1433 ir_mode *mode, tarval *con, ir_type *tp) {
1434 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1436 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1437 symconst_symbol value, symconst_kind symkind) {
1438 return new_rd_SymConst(NULL, irg, block, value, symkind);
1440 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1441 ir_node *objptr, ir_entity *ent) {
1442 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1444 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1445 ir_node *objptr, int n_index, ir_node **index,
1447 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1449 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1450 ir_node *callee, int arity, ir_node **in,
1452 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1454 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1455 ir_node *op1, ir_node *op2, ir_mode *mode) {
1456 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1458 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1459 ir_node *op1, ir_node *op2, ir_mode *mode) {
1460 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1462 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1463 ir_node *op, ir_mode *mode) {
1464 return new_rd_Minus(NULL, irg, block, op, mode);
1466 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1467 ir_node *op1, ir_node *op2, ir_mode *mode) {
1468 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1470 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1471 ir_node *memop, ir_node *op1, ir_node *op2) {
1472 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1474 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1475 ir_node *memop, ir_node *op1, ir_node *op2) {
1476 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1478 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1479 ir_node *memop, ir_node *op1, ir_node *op2) {
1480 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1482 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1483 ir_node *memop, ir_node *op1, ir_node *op2) {
1484 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1486 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1487 ir_node *op, ir_mode *mode) {
1488 return new_rd_Abs(NULL, irg, block, op, mode);
1490 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1491 ir_node *op1, ir_node *op2, ir_mode *mode) {
1492 return new_rd_And(NULL, irg, block, op1, op2, mode);
1494 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1495 ir_node *op1, ir_node *op2, ir_mode *mode) {
1496 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1498 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1499 ir_node *op1, ir_node *op2, ir_mode *mode) {
1500 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1502 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1503 ir_node *op, ir_mode *mode) {
1504 return new_rd_Not(NULL, irg, block, op, mode);
1506 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1507 ir_node *op, ir_node *k, ir_mode *mode) {
1508 return new_rd_Shl(NULL, irg, block, op, k, mode);
1510 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1511 ir_node *op, ir_node *k, ir_mode *mode) {
1512 return new_rd_Shr(NULL, irg, block, op, k, mode);
1514 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1515 ir_node *op, ir_node *k, ir_mode *mode) {
1516 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1518 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1519 ir_node *op, ir_node *k, ir_mode *mode) {
1520 return new_rd_Rot(NULL, irg, block, op, k, mode);
1522 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1523 ir_node *op, ir_node *k, ir_mode *mode) {
1524 return new_rd_Carry(NULL, irg, block, op, k, mode);
1526 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1527 ir_node *op, ir_node *k, ir_mode *mode) {
1528 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1530 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1531 ir_node *op1, ir_node *op2) {
1532 return new_rd_Cmp(NULL, irg, block, op1, op2);
1534 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1535 ir_node *op, ir_mode *mode) {
1536 return new_rd_Conv(NULL, irg, block, op, mode);
1538 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1539 return new_rd_Cast(NULL, irg, block, op, to_tp);
1541 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1542 ir_node **in, ir_mode *mode) {
1543 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1545 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1546 ir_node *store, ir_node *adr, ir_mode *mode) {
1547 return new_rd_Load(NULL, irg, block, store, adr, mode);
1549 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1550 ir_node *store, ir_node *adr, ir_node *val) {
1551 return new_rd_Store(NULL, irg, block, store, adr, val);
1553 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1554 ir_node *size, ir_type *alloc_type, where_alloc where) {
1555 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1557 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1558 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1559 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1561 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1562 return new_rd_Sync(NULL, irg, block, arity, in);
1564 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1565 ir_mode *mode, long proj) {
1566 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1568 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1570 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1572 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1573 int arity, ir_node **in) {
1574 return new_rd_Tuple(NULL, irg, block, arity, in );
1576 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1577 ir_node *val, ir_mode *mode) {
1578 return new_rd_Id(NULL, irg, block, val, mode);
1580 ir_node *new_r_Bad(ir_graph *irg) {
1581 return new_rd_Bad(irg);
1583 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1584 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1586 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1587 return new_rd_Unknown(irg, m);
1589 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1590 return new_rd_CallBegin(NULL, irg, block, callee);
1592 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1593 return new_rd_EndReg(NULL, irg, block);
1595 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1596 return new_rd_EndExcept(NULL, irg, block);
1598 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1599 return new_rd_Break(NULL, irg, block);
1601 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1602 ir_mode *mode, long proj) {
1603 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1605 ir_node *new_r_NoMem(ir_graph *irg) {
1606 return new_rd_NoMem(irg);
1608 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1609 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1610 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1612 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1613 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1614 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1616 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1617 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1618 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1620 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1622 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1624 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1625 ir_node *store, ir_node *obj) {
1626 return new_rd_Raise(NULL, irg, block, store, obj);
1628 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1629 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1630 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1632 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1633 return new_rd_Pin(NULL, irg, block, node);
1636 /** ********************/
1637 /** public interfaces */
1638 /** construction tools */
1642 * - create a new Start node in the current block
1644 * @return s - pointer to the created Start node
1649 new_d_Start(dbg_info *db) {
1652 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1653 op_Start, mode_T, 0, NULL);
1655 res = optimize_node(res);
1656 IRN_VRFY_IRG(res, current_ir_graph);
1661 new_d_End(dbg_info *db) {
1663 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1664 op_End, mode_X, -1, NULL);
1665 res = optimize_node(res);
1666 IRN_VRFY_IRG(res, current_ir_graph);
1671 /* Constructs a Block with a fixed number of predecessors.
1672 Does set current_block. Can be used with automatic Phi
1673 node construction. */
1675 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1678 int has_unknown = 0;
1680 res = new_bd_Block(db, arity, in);
1682 /* Create and initialize array for Phi-node construction. */
1683 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1684 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1685 current_ir_graph->n_loc);
1686 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1689 for (i = arity-1; i >= 0; i--)
1690 if (get_irn_op(in[i]) == op_Unknown) {
1695 if (!has_unknown) res = optimize_node(res);
1696 current_ir_graph->current_block = res;
1698 IRN_VRFY_IRG(res, current_ir_graph);
1703 /* ***********************************************************************/
1704 /* Methods necessary for automatic Phi node creation */
1706 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1707 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1708 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1709 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1711 Call Graph: ( A ---> B == A "calls" B)
1713 get_value mature_immBlock
1721 get_r_value_internal |
1725 new_rd_Phi0 new_rd_Phi_in
1727 * *************************************************************************** */
1729 /** Creates a Phi node with 0 predecessors. */
1730 static INLINE ir_node *
1731 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1734 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1735 IRN_VRFY_IRG(res, irg);
1739 /* There are two implementations of the Phi node construction. The first
1740 is faster, but does not work for blocks with more than 2 predecessors.
1741 The second works always but is slower and causes more unnecessary Phi
1743 Select the implementations by the following preprocessor flag set in
1745 #if USE_FAST_PHI_CONSTRUCTION
1747 /* This is a stack used for allocating and deallocating nodes in
1748 new_rd_Phi_in. The original implementation used the obstack
1749 to model this stack, now it is explicit. This reduces side effects.
1751 #if USE_EXPLICIT_PHI_IN_STACK
1753 new_Phi_in_stack(void) {
1756 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1758 res->stack = NEW_ARR_F (ir_node *, 0);
1762 } /* new_Phi_in_stack */
1765 free_Phi_in_stack(Phi_in_stack *s) {
1766 DEL_ARR_F(s->stack);
1768 } /* free_Phi_in_stack */
1771 free_to_Phi_in_stack(ir_node *phi) {
1772 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1773 current_ir_graph->Phi_in_stack->pos)
1774 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1776 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1778 (current_ir_graph->Phi_in_stack->pos)++;
1779 } /* free_to_Phi_in_stack */
1781 static INLINE ir_node *
1782 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1783 int arity, ir_node **in) {
1785 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1786 int pos = current_ir_graph->Phi_in_stack->pos;
1790 /* We need to allocate a new node */
1791 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1792 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1794 /* reuse the old node and initialize it again. */
1797 assert(res->kind == k_ir_node);
1798 assert(res->op == op_Phi);
1803 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1804 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1806 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1808 (current_ir_graph->Phi_in_stack->pos)--;
1811 } /* alloc_or_pop_from_Phi_in_stack */
1812 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1815 * Creates a Phi node with a given, fixed array **in of predecessors.
1816 * If the Phi node is unnecessary, as the same value reaches the block
1817 * through all control flow paths, it is eliminated and the value
1818 * returned directly. This constructor is only intended for use in
1819 * the automatic Phi node generation triggered by get_value or mature.
1820 * The implementation is quite tricky and depends on the fact, that
1821 * the nodes are allocated on a stack:
1822 * The in array contains predecessors and NULLs. The NULLs appear,
1823 * if get_r_value_internal, that computed the predecessors, reached
1824 * the same block on two paths. In this case the same value reaches
1825 * this block on both paths, there is no definition in between. We need
1826 * not allocate a Phi where these path's merge, but we have to communicate
1827 * this fact to the caller. This happens by returning a pointer to the
1828 * node the caller _will_ allocate. (Yes, we predict the address. We can
1829 * do so because the nodes are allocated on the obstack.) The caller then
1830 * finds a pointer to itself and, when this routine is called again,
1831 * eliminates itself.
1833 static INLINE ir_node *
1834 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1836 ir_node *res, *known;
1838 /* Allocate a new node on the obstack. This can return a node to
1839 which some of the pointers in the in-array already point.
1840 Attention: the constructor copies the in array, i.e., the later
1841 changes to the array in this routine do not affect the
1842 constructed node! If the in array contains NULLs, there will be
1843 missing predecessors in the returned node. Is this a possible
1844 internal state of the Phi node generation? */
1845 #if USE_EXPLICIT_PHI_IN_STACK
1846 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1848 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1849 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1852 /* The in-array can contain NULLs. These were returned by
1853 get_r_value_internal if it reached the same block/definition on a
1854 second path. The NULLs are replaced by the node itself to
1855 simplify the test in the next loop. */
1856 for (i = 0; i < ins; ++i) {
1861 /* This loop checks whether the Phi has more than one predecessor.
1862 If so, it is a real Phi node and we break the loop. Else the Phi
1863 node merges the same definition on several paths and therefore is
1865 for (i = 0; i < ins; ++i) {
1866 if (in[i] == res || in[i] == known)
1875 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1877 #if USE_EXPLICIT_PHI_IN_STACK
1878 free_to_Phi_in_stack(res);
1880 edges_node_deleted(res, current_ir_graph);
1881 obstack_free(current_ir_graph->obst, res);
1885 res = optimize_node (res);
1886 IRN_VRFY_IRG(res, irg);
1889 /* return the pointer to the Phi node. This node might be deallocated! */
1891 } /* new_rd_Phi_in */
1894 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1897 * Allocates and returns this node. The routine called to allocate the
1898 * node might optimize it away and return a real value, or even a pointer
1899 * to a deallocated Phi node on top of the obstack!
1900 * This function is called with an in-array of proper size.
1903 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1904 ir_node *prevBlock, *res;
1907 /* This loop goes to all predecessor blocks of the block the Phi node is in
1908 and there finds the operands of the Phi node by calling
1909 get_r_value_internal. */
1910 for (i = 1; i <= ins; ++i) {
1911 assert (block->in[i]);
1912 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1914 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1917 /* After collecting all predecessors into the array nin a new Phi node
1918 with these predecessors is created. This constructor contains an
1919 optimization: If all predecessors of the Phi node are identical it
1920 returns the only operand instead of a new Phi node. If the value
1921 passes two different control flow edges without being defined, and
1922 this is the second path treated, a pointer to the node that will be
1923 allocated for the first path (recursion) is returned. We already
1924 know the address of this node, as it is the next node to be allocated
1925 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1926 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1928 /* Now we now the value for "pos" and can enter it in the array with
1929 all known local variables. Attention: this might be a pointer to
1930 a node, that later will be allocated!!! See new_rd_Phi_in().
1931 If this is called in mature, after some set_value() in the same block,
1932 the proper value must not be overwritten:
1934 get_value (makes Phi0, put's it into graph_arr)
1935 set_value (overwrites Phi0 in graph_arr)
1936 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1939 if (!block->attr.block.graph_arr[pos]) {
1940 block->attr.block.graph_arr[pos] = res;
1942 /* printf(" value already computed by %s\n",
1943 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1950 * This function returns the last definition of a variable. In case
1951 * this variable was last defined in a previous block, Phi nodes are
1952 * inserted. If the part of the firm graph containing the definition
1953 * is not yet constructed, a dummy Phi node is returned.
1956 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1959 /* There are 4 cases to treat.
1961 1. The block is not mature and we visit it the first time. We can not
1962 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1963 predecessors is returned. This node is added to the linked list (field
1964 "link") of the containing block to be completed when this block is
1965 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1968 2. The value is already known in this block, graph_arr[pos] is set and we
1969 visit the block the first time. We can return the value without
1970 creating any new nodes.
1972 3. The block is mature and we visit it the first time. A Phi node needs
1973 to be created (phi_merge). If the Phi is not needed, as all it's
1974 operands are the same value reaching the block through different
1975 paths, it's optimized away and the value itself is returned.
1977 4. The block is mature, and we visit it the second time. Now two
1978 subcases are possible:
1979 * The value was computed completely the last time we were here. This
1980 is the case if there is no loop. We can return the proper value.
1981 * The recursion that visited this node and set the flag did not
1982 return yet. We are computing a value in a loop and need to
1983 break the recursion without knowing the result yet.
1984 @@@ strange case. Straight forward we would create a Phi before
1985 starting the computation of it's predecessors. In this case we will
1986 find a Phi here in any case. The problem is that this implementation
1987 only creates a Phi after computing the predecessors, so that it is
1988 hard to compute self references of this Phi. @@@
1989 There is no simple check for the second subcase. Therefore we check
1990 for a second visit and treat all such cases as the second subcase.
1991 Anyways, the basic situation is the same: we reached a block
1992 on two paths without finding a definition of the value: No Phi
1993 nodes are needed on both paths.
1994 We return this information "Two paths, no Phi needed" by a very tricky
1995 implementation that relies on the fact that an obstack is a stack and
1996 will return a node with the same address on different allocations.
1997 Look also at phi_merge and new_rd_phi_in to understand this.
1998 @@@ Unfortunately this does not work, see testprogram
1999 three_cfpred_example.
2003 /* case 4 -- already visited. */
2004 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2006 /* visited the first time */
2007 set_irn_visited(block, get_irg_visited(current_ir_graph));
2009 /* Get the local valid value */
2010 res = block->attr.block.graph_arr[pos];
2012 /* case 2 -- If the value is actually computed, return it. */
2013 if (res) return res;
2015 if (block->attr.block.matured) { /* case 3 */
2017 /* The Phi has the same amount of ins as the corresponding block. */
2018 int ins = get_irn_arity(block);
2020 NEW_ARR_A(ir_node *, nin, ins);
2022 /* Phi merge collects the predecessors and then creates a node. */
2023 res = phi_merge(block, pos, mode, nin, ins);
2025 } else { /* case 1 */
2026 /* The block is not mature, we don't know how many in's are needed. A Phi
2027 with zero predecessors is created. Such a Phi node is called Phi0
2028 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2029 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2031 The Phi0 has to remember the pos of it's internal value. If the real
2032 Phi is computed, pos is used to update the array with the local
2035 res = new_rd_Phi0(current_ir_graph, block, mode);
2036 res->attr.phi0_pos = pos;
2037 res->link = block->link;
2041 /* If we get here, the frontend missed a use-before-definition error */
2044 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2045 assert(mode->code >= irm_F && mode->code <= irm_P);
2046 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2047 tarval_mode_null[mode->code]);
2050 /* The local valid value is available now. */
2051 block->attr.block.graph_arr[pos] = res;
2054 } /* get_r_value_internal */
2059 it starts the recursion. This causes an Id at the entry of
2060 every block that has no definition of the value! **/
2062 #if USE_EXPLICIT_PHI_IN_STACK
2064 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2065 void free_Phi_in_stack(Phi_in_stack *s) {}
2068 static INLINE ir_node *
2069 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2070 ir_node **in, int ins, ir_node *phi0) {
2072 ir_node *res, *known;
2074 /* Allocate a new node on the obstack. The allocation copies the in
2076 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2077 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2079 /* This loop checks whether the Phi has more than one predecessor.
2080 If so, it is a real Phi node and we break the loop. Else the
2081 Phi node merges the same definition on several paths and therefore
2082 is not needed. Don't consider Bad nodes! */
2084 for (i=0; i < ins; ++i)
2088 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2090 /* Optimize self referencing Phis: We can't detect them yet properly, as
2091 they still refer to the Phi0 they will replace. So replace right now. */
2092 if (phi0 && in[i] == phi0) in[i] = res;
2094 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2102 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2105 edges_node_deleted(res, current_ir_graph);
2106 obstack_free (current_ir_graph->obst, res);
2107 if (is_Phi(known)) {
2108 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2109 order, an enclosing Phi know may get superfluous. */
2110 res = optimize_in_place_2(known);
2112 exchange(known, res);
2118 /* A undefined value, e.g., in unreachable code. */
2122 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2123 IRN_VRFY_IRG(res, irg);
2124 /* Memory Phis in endless loops must be kept alive.
2125 As we can't distinguish these easily we keep all of them alive. */
2126 if ((res->op == op_Phi) && (mode == mode_M))
2127 add_End_keepalive(get_irg_end(irg), res);
2131 } /* new_rd_Phi_in */
2134 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2136 #if PRECISE_EXC_CONTEXT
2138 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2141 * Construct a new frag_array for node n.
2142 * Copy the content from the current graph_arr of the corresponding block:
2143 * this is the current state.
2144 * Set ProjM(n) as current memory state.
2145 * Further the last entry in frag_arr of current block points to n. This
2146 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2148 static INLINE ir_node **new_frag_arr(ir_node *n) {
2152 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2153 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2154 sizeof(ir_node *)*current_ir_graph->n_loc);
2156 /* turn off optimization before allocating Proj nodes, as res isn't
2158 opt = get_opt_optimize(); set_optimize(0);
2159 /* Here we rely on the fact that all frag ops have Memory as first result! */
2160 if (get_irn_op(n) == op_Call)
2161 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2162 else if (get_irn_op(n) == op_CopyB)
2163 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2165 assert((pn_Quot_M == pn_DivMod_M) &&
2166 (pn_Quot_M == pn_Div_M) &&
2167 (pn_Quot_M == pn_Mod_M) &&
2168 (pn_Quot_M == pn_Load_M) &&
2169 (pn_Quot_M == pn_Store_M) &&
2170 (pn_Quot_M == pn_Alloc_M) &&
2171 (pn_Quot_M == pn_Bound_M));
2172 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2176 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2178 } /* new_frag_arr */
2181 * Returns the frag_arr from a node.
2183 static INLINE ir_node **get_frag_arr(ir_node *n) {
2184 switch (get_irn_opcode(n)) {
2186 return n->attr.call.exc.frag_arr;
2188 return n->attr.alloc.exc.frag_arr;
2190 return n->attr.load.exc.frag_arr;
2192 return n->attr.store.exc.frag_arr;
2194 return n->attr.except.frag_arr;
2196 } /* get_frag_arr */
2199 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2201 if (!frag_arr[pos]) frag_arr[pos] = val;
2202 if (frag_arr[current_ir_graph->n_loc - 1]) {
2203 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2204 assert(arr != frag_arr && "Endless recursion detected");
2205 set_frag_value(arr, pos, val);
2210 for (i = 0; i < 1000; ++i) {
2211 if (!frag_arr[pos]) {
2212 frag_arr[pos] = val;
2214 if (frag_arr[current_ir_graph->n_loc - 1]) {
2215 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2221 assert(0 && "potential endless recursion");
2223 } /* set_frag_value */
2226 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2230 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2232 frag_arr = get_frag_arr(cfOp);
2233 res = frag_arr[pos];
2235 if (block->attr.block.graph_arr[pos]) {
2236 /* There was a set_value() after the cfOp and no get_value before that
2237 set_value(). We must build a Phi node now. */
2238 if (block->attr.block.matured) {
2239 int ins = get_irn_arity(block);
2241 NEW_ARR_A (ir_node *, nin, ins);
2242 res = phi_merge(block, pos, mode, nin, ins);
2244 res = new_rd_Phi0 (current_ir_graph, block, mode);
2245 res->attr.phi0_pos = pos;
2246 res->link = block->link;
2250 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2251 but this should be better: (remove comment if this works) */
2252 /* It's a Phi, we can write this into all graph_arrs with NULL */
2253 set_frag_value(block->attr.block.graph_arr, pos, res);
2255 res = get_r_value_internal(block, pos, mode);
2256 set_frag_value(block->attr.block.graph_arr, pos, res);
2260 } /* get_r_frag_value_internal */
2261 #endif /* PRECISE_EXC_CONTEXT */
2264 * Computes the predecessors for the real phi node, and then
2265 * allocates and returns this node. The routine called to allocate the
2266 * node might optimize it away and return a real value.
2267 * This function must be called with an in-array of proper size.
2270 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2271 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2274 /* If this block has no value at pos create a Phi0 and remember it
2275 in graph_arr to break recursions.
2276 Else we may not set graph_arr as there a later value is remembered. */
2278 if (!block->attr.block.graph_arr[pos]) {
2279 if (block == get_irg_start_block(current_ir_graph)) {
2280 /* Collapsing to Bad tarvals is no good idea.
2281 So we call a user-supplied routine here that deals with this case as
2282 appropriate for the given language. Sorrily the only help we can give
2283 here is the position.
2285 Even if all variables are defined before use, it can happen that
2286 we get to the start block, if a Cond has been replaced by a tuple
2287 (bad, jmp). In this case we call the function needlessly, eventually
2288 generating an non existent error.
2289 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2292 if (default_initialize_local_variable) {
2293 ir_node *rem = get_cur_block();
2295 set_cur_block(block);
2296 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2300 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2301 /* We don't need to care about exception ops in the start block.
2302 There are none by definition. */
2303 return block->attr.block.graph_arr[pos];
2305 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2306 block->attr.block.graph_arr[pos] = phi0;
2307 #if PRECISE_EXC_CONTEXT
2308 if (get_opt_precise_exc_context()) {
2309 /* Set graph_arr for fragile ops. Also here we should break recursion.
2310 We could choose a cyclic path through an cfop. But the recursion would
2311 break at some point. */
2312 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2318 /* This loop goes to all predecessor blocks of the block the Phi node
2319 is in and there finds the operands of the Phi node by calling
2320 get_r_value_internal. */
2321 for (i = 1; i <= ins; ++i) {
2322 prevCfOp = skip_Proj(block->in[i]);
2324 if (is_Bad(prevCfOp)) {
2325 /* In case a Cond has been optimized we would get right to the start block
2326 with an invalid definition. */
2327 nin[i-1] = new_Bad();
2330 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2332 if (!is_Bad(prevBlock)) {
2333 #if PRECISE_EXC_CONTEXT
2334 if (get_opt_precise_exc_context() &&
2335 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2336 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2337 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2340 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2342 nin[i-1] = new_Bad();
2346 /* We want to pass the Phi0 node to the constructor: this finds additional
2347 optimization possibilities.
2348 The Phi0 node either is allocated in this function, or it comes from
2349 a former call to get_r_value_internal. In this case we may not yet
2350 exchange phi0, as this is done in mature_immBlock. */
2352 phi0_all = block->attr.block.graph_arr[pos];
2353 if (!((get_irn_op(phi0_all) == op_Phi) &&
2354 (get_irn_arity(phi0_all) == 0) &&
2355 (get_nodes_block(phi0_all) == block)))
2361 /* After collecting all predecessors into the array nin a new Phi node
2362 with these predecessors is created. This constructor contains an
2363 optimization: If all predecessors of the Phi node are identical it
2364 returns the only operand instead of a new Phi node. */
2365 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2367 /* In case we allocated a Phi0 node at the beginning of this procedure,
2368 we need to exchange this Phi0 with the real Phi. */
2370 exchange(phi0, res);
2371 block->attr.block.graph_arr[pos] = res;
2372 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2373 only an optimization. */
2380 * This function returns the last definition of a variable. In case
2381 * this variable was last defined in a previous block, Phi nodes are
2382 * inserted. If the part of the firm graph containing the definition
2383 * is not yet constructed, a dummy Phi node is returned.
2386 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2388 /* There are 4 cases to treat.
2390 1. The block is not mature and we visit it the first time. We can not
2391 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2392 predecessors is returned. This node is added to the linked list (field
2393 "link") of the containing block to be completed when this block is
2394 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2397 2. The value is already known in this block, graph_arr[pos] is set and we
2398 visit the block the first time. We can return the value without
2399 creating any new nodes.
2401 3. The block is mature and we visit it the first time. A Phi node needs
2402 to be created (phi_merge). If the Phi is not needed, as all it's
2403 operands are the same value reaching the block through different
2404 paths, it's optimized away and the value itself is returned.
2406 4. The block is mature, and we visit it the second time. Now two
2407 subcases are possible:
2408 * The value was computed completely the last time we were here. This
2409 is the case if there is no loop. We can return the proper value.
2410 * The recursion that visited this node and set the flag did not
2411 return yet. We are computing a value in a loop and need to
2412 break the recursion. This case only happens if we visited
2413 the same block with phi_merge before, which inserted a Phi0.
2414 So we return the Phi0.
2417 /* case 4 -- already visited. */
2418 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2419 /* As phi_merge allocates a Phi0 this value is always defined. Here
2420 is the critical difference of the two algorithms. */
2421 assert(block->attr.block.graph_arr[pos]);
2422 return block->attr.block.graph_arr[pos];
2425 /* visited the first time */
2426 set_irn_visited(block, get_irg_visited(current_ir_graph));
2428 /* Get the local valid value */
2429 res = block->attr.block.graph_arr[pos];
2431 /* case 2 -- If the value is actually computed, return it. */
2432 if (res) { return res; };
2434 if (block->attr.block.matured) { /* case 3 */
2436 /* The Phi has the same amount of ins as the corresponding block. */
2437 int ins = get_irn_arity(block);
2439 NEW_ARR_A (ir_node *, nin, ins);
2441 /* Phi merge collects the predecessors and then creates a node. */
2442 res = phi_merge (block, pos, mode, nin, ins);
2444 } else { /* case 1 */
2445 /* The block is not mature, we don't know how many in's are needed. A Phi
2446 with zero predecessors is created. Such a Phi node is called Phi0
2447 node. The Phi0 is then added to the list of Phi0 nodes in this block
2448 to be matured by mature_immBlock later.
2449 The Phi0 has to remember the pos of it's internal value. If the real
2450 Phi is computed, pos is used to update the array with the local
2452 res = new_rd_Phi0 (current_ir_graph, block, mode);
2453 res->attr.phi0_pos = pos;
2454 res->link = block->link;
2458 /* If we get here, the frontend missed a use-before-definition error */
2461 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2462 assert(mode->code >= irm_F && mode->code <= irm_P);
2463 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2464 get_mode_null(mode));
2467 /* The local valid value is available now. */
2468 block->attr.block.graph_arr[pos] = res;
2471 } /* get_r_value_internal */
2473 #endif /* USE_FAST_PHI_CONSTRUCTION */
2475 /* ************************************************************************** */
2478 * Finalize a Block node, when all control flows are known.
2479 * Acceptable parameters are only Block nodes.
2482 mature_immBlock(ir_node *block) {
2487 assert(get_irn_opcode(block) == iro_Block);
2488 /* @@@ should be commented in
2489 assert (!get_Block_matured(block) && "Block already matured"); */
2491 if (!get_Block_matured(block)) {
2492 ins = ARR_LEN (block->in)-1;
2493 /* Fix block parameters */
2494 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2496 /* An array for building the Phi nodes. */
2497 NEW_ARR_A (ir_node *, nin, ins);
2499 /* Traverse a chain of Phi nodes attached to this block and mature
2501 for (n = block->link; n; n = next) {
2502 inc_irg_visited(current_ir_graph);
2504 exchange(n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2507 block->attr.block.matured = 1;
2509 /* Now, as the block is a finished firm node, we can optimize it.
2510 Since other nodes have been allocated since the block was created
2511 we can not free the node on the obstack. Therefore we have to call
2513 Unfortunately the optimization does not change a lot, as all allocated
2514 nodes refer to the unoptimized node.
2515 We can call _2, as global cse has no effect on blocks. */
2516 block = optimize_in_place_2(block);
2517 IRN_VRFY_IRG(block, current_ir_graph);
2519 } /* mature_immBlock */
2522 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2523 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2527 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2528 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2532 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2533 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2534 } /* new_d_Const_long */
2537 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2538 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2539 } /* new_d_Const_type */
2543 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2544 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2548 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2549 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2553 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2555 assert(arg->op == op_Cond);
2556 arg->attr.cond.kind = fragmentary;
2557 arg->attr.cond.default_proj = max_proj;
2558 res = new_Proj(arg, mode_X, max_proj);
2560 } /* new_d_defaultProj */
2563 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2564 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2568 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2569 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2570 } /* new_d_strictConv */
2573 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2574 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2578 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2579 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2588 * Allocate the frag array.
2590 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2591 if (get_opt_precise_exc_context()) {
2592 if ((current_ir_graph->phase_state == phase_building) &&
2593 (get_irn_op(res) == op) && /* Could be optimized away. */
2594 !*frag_store) /* Could be a cse where the arr is already set. */ {
2595 *frag_store = new_frag_arr(res);
2598 } /* allocate_frag_arr */
2601 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2603 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2);
2604 res->attr.except.pin_state = op_pin_state_pinned;
2605 #if PRECISE_EXC_CONTEXT
2606 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2613 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2615 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2);
2616 res->attr.except.pin_state = op_pin_state_pinned;
2617 #if PRECISE_EXC_CONTEXT
2618 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2622 } /* new_d_DivMod */
2625 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2627 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2);
2628 res->attr.except.pin_state = op_pin_state_pinned;
2629 #if PRECISE_EXC_CONTEXT
2630 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2637 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2639 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2);
2640 res->attr.except.pin_state = op_pin_state_pinned;
2641 #if PRECISE_EXC_CONTEXT
2642 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2661 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2662 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2666 new_d_Jmp(dbg_info *db) {
2667 return new_bd_Jmp(db, current_ir_graph->current_block);
2671 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2672 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2676 new_d_Cond(dbg_info *db, ir_node *c) {
2677 return new_bd_Cond(db, current_ir_graph->current_block, c);
2681 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2684 res = new_bd_Call(db, current_ir_graph->current_block,
2685 store, callee, arity, in, tp);
2686 #if PRECISE_EXC_CONTEXT
2687 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2694 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2695 return new_bd_Return(db, current_ir_graph->current_block,
2697 } /* new_d_Return */
2700 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2702 res = new_bd_Load(db, current_ir_graph->current_block,
2704 #if PRECISE_EXC_CONTEXT
2705 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2712 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2714 res = new_bd_Store(db, current_ir_graph->current_block,
2716 #if PRECISE_EXC_CONTEXT
2717 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2724 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2725 where_alloc where) {
2727 res = new_bd_Alloc(db, current_ir_graph->current_block,
2728 store, size, alloc_type, where);
2729 #if PRECISE_EXC_CONTEXT
2730 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2737 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2738 ir_node *size, ir_type *free_type, where_alloc where) {
2739 return new_bd_Free(db, current_ir_graph->current_block,
2740 store, ptr, size, free_type, where);
2744 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2745 /* GL: objptr was called frame before. Frame was a bad choice for the name
2746 as the operand could as well be a pointer to a dynamic object. */
2748 return new_bd_Sel(db, current_ir_graph->current_block,
2749 store, objptr, 0, NULL, ent);
2750 } /* new_d_simpleSel */
2753 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2754 return new_bd_Sel(db, current_ir_graph->current_block,
2755 store, objptr, n_index, index, sel);
2759 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2760 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2762 } /* new_d_SymConst_type */
2765 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2766 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2767 value, kind, firm_unknown_type);
2768 } /* new_d_SymConst */
2771 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2772 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2778 return _new_d_Bad();
2782 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2783 return new_bd_Confirm(db, current_ir_graph->current_block,
2785 } /* new_d_Confirm */
2788 new_d_Unknown(ir_mode *m) {
2789 return new_bd_Unknown(m);
2790 } /* new_d_Unknown */
2793 new_d_CallBegin(dbg_info *db, ir_node *call) {
2794 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2795 } /* new_d_CallBegin */
2798 new_d_EndReg(dbg_info *db) {
2799 return new_bd_EndReg(db, current_ir_graph->current_block);
2800 } /* new_d_EndReg */
2803 new_d_EndExcept(dbg_info *db) {
2804 return new_bd_EndExcept(db, current_ir_graph->current_block);
2805 } /* new_d_EndExcept */
2808 new_d_Break(dbg_info *db) {
2809 return new_bd_Break(db, current_ir_graph->current_block);
2813 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2814 return new_bd_Filter(db, current_ir_graph->current_block,
2816 } /* new_d_Filter */
2819 (new_d_NoMem)(void) {
2820 return _new_d_NoMem();
2824 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2825 ir_node *ir_true, ir_mode *mode) {
2826 return new_bd_Mux(db, current_ir_graph->current_block,
2827 sel, ir_false, ir_true, mode);
2831 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2832 return new_bd_Psi(db, current_ir_graph->current_block,
2833 arity, conds, vals, mode);
2836 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2837 ir_node *dst, ir_node *src, ir_type *data_type) {
2839 res = new_bd_CopyB(db, current_ir_graph->current_block,
2840 store, dst, src, data_type);
2841 #if PRECISE_EXC_CONTEXT
2842 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2848 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2849 return new_bd_InstOf(db, current_ir_graph->current_block,
2850 store, objptr, type);
2851 } /* new_d_InstOf */
2854 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2855 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2858 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2859 ir_node *idx, ir_node *lower, ir_node *upper) {
2861 res = new_bd_Bound(db, current_ir_graph->current_block,
2862 store, idx, lower, upper);
2863 #if PRECISE_EXC_CONTEXT
2864 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2870 new_d_Pin(dbg_info *db, ir_node *node) {
2871 return new_bd_Pin(db, current_ir_graph->current_block, node);
2874 /* ********************************************************************* */
2875 /* Comfortable interface with automatic Phi node construction. */
2876 /* (Uses also constructors of ?? interface, except new_Block. */
2877 /* ********************************************************************* */
2879 /* Block construction */
2880 /* immature Block without predecessors */
2881 ir_node *new_d_immBlock(dbg_info *db) {
2884 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2885 /* creates a new dynamic in-array as length of in is -1 */
2886 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2887 current_ir_graph->current_block = res;
2888 res->attr.block.matured = 0;
2889 res->attr.block.dead = 0;
2890 res->attr.block.irg = current_ir_graph;
2891 res->attr.block.backedge = NULL;
2892 res->attr.block.in_cg = NULL;
2893 res->attr.block.cg_backedge = NULL;
2894 res->attr.block.extblk = NULL;
2895 res->attr.block.region = NULL;
2896 set_Block_block_visited(res, 0);
2898 /* Create and initialize array for Phi-node construction. */
2899 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2900 current_ir_graph->n_loc);
2901 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2903 /* Immature block may not be optimized! */
2904 IRN_VRFY_IRG(res, current_ir_graph);
2907 } /* new_d_immBlock */
2910 new_immBlock(void) {
2911 return new_d_immBlock(NULL);
2912 } /* new_immBlock */
2914 /* add an edge to a jmp/control flow node */
2916 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2917 if (block->attr.block.matured) {
2918 assert(0 && "Error: Block already matured!\n");
2920 int n = ARR_LEN(block->in) - 1;
2921 assert(jmp != NULL);
2922 ARR_APP1(ir_node *, block->in, jmp);
2924 hook_set_irn_n(block, n, jmp, NULL);
2926 } /* add_immBlock_pred */
2928 /* changing the current block */
2930 set_cur_block(ir_node *target) {
2931 current_ir_graph->current_block = target;
2932 } /* set_cur_block */
2934 /* ************************ */
2935 /* parameter administration */
2937 /* get a value from the parameter array from the current block by its index */
2939 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2940 ir_graph *irg = current_ir_graph;
2941 assert(get_irg_phase_state(irg) == phase_building);
2942 inc_irg_visited(irg);
2944 return get_r_value_internal(irg->current_block, pos + 1, mode);
2947 /* get a value from the parameter array from the current block by its index */
2949 get_value(int pos, ir_mode *mode) {
2950 return get_d_value(NULL, pos, mode);
2953 /* set a value at position pos in the parameter array from the current block */
2955 set_value(int pos, ir_node *value) {
2956 ir_graph *irg = current_ir_graph;
2957 assert(get_irg_phase_state(irg) == phase_building);
2958 assert(pos+1 < irg->n_loc);
2959 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2962 /* Find the value number for a node in the current block.*/
2964 find_value(ir_node *value) {
2966 ir_node *bl = current_ir_graph->current_block;
2968 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2969 if (bl->attr.block.graph_arr[i] == value)
2974 /* get the current store */
2977 ir_graph *irg = current_ir_graph;
2979 assert(get_irg_phase_state(irg) == phase_building);
2980 /* GL: one could call get_value instead */
2981 inc_irg_visited(irg);
2982 return get_r_value_internal(irg->current_block, 0, mode_M);
2985 /* set the current store: handles automatic Sync construction for Load nodes */
2987 set_store(ir_node *store) {
2988 ir_node *load, *pload, *pred, *in[2];
2990 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2991 /* Beware: due to dead code elimination, a store might become a Bad node even in
2992 the construction phase. */
2993 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2995 if (get_opt_auto_create_sync()) {
2996 /* handle non-volatile Load nodes by automatically creating Sync's */
2997 load = skip_Proj(store);
2998 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2999 pred = get_Load_mem(load);
3001 if (is_Sync(pred)) {
3002 /* a Load after a Sync: move it up */
3003 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3005 set_Load_mem(load, get_memop_mem(mem));
3006 add_Sync_pred(pred, store);
3009 pload = skip_Proj(pred);
3010 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3011 /* a Load after a Load: create a new Sync */
3012 set_Load_mem(load, get_Load_mem(pload));
3016 store = new_Sync(2, in);
3021 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3025 keep_alive(ir_node *ka) {
3026 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3029 /* --- Useful access routines --- */
3030 /* Returns the current block of the current graph. To set the current
3031 block use set_cur_block. */
3032 ir_node *get_cur_block(void) {
3033 return get_irg_current_block(current_ir_graph);
3034 } /* get_cur_block */
3036 /* Returns the frame type of the current graph */
3037 ir_type *get_cur_frame_type(void) {
3038 return get_irg_frame_type(current_ir_graph);
3039 } /* get_cur_frame_type */
3042 /* ********************************************************************* */
3045 /* call once for each run of the library */
3047 init_cons(uninitialized_local_variable_func_t *func) {
3048 default_initialize_local_variable = func;
3052 irp_finalize_cons(void) {
3054 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3055 irg_finalize_cons(get_irp_irg(i));
3057 irp->phase_state = phase_high;
3058 } /* irp_finalize_cons */
3061 ir_node *new_Block(int arity, ir_node **in) {
3062 return new_d_Block(NULL, arity, in);
3064 ir_node *new_Start(void) {
3065 return new_d_Start(NULL);
3067 ir_node *new_End(void) {
3068 return new_d_End(NULL);
3070 ir_node *new_Jmp(void) {
3071 return new_d_Jmp(NULL);
3073 ir_node *new_IJmp(ir_node *tgt) {
3074 return new_d_IJmp(NULL, tgt);
3076 ir_node *new_Cond(ir_node *c) {
3077 return new_d_Cond(NULL, c);
3079 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3080 return new_d_Return(NULL, store, arity, in);
3082 ir_node *new_Const(ir_mode *mode, tarval *con) {
3083 return new_d_Const(NULL, mode, con);
3086 ir_node *new_Const_long(ir_mode *mode, long value) {
3087 return new_d_Const_long(NULL, mode, value);
3090 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3091 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3094 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3095 return new_d_SymConst_type(NULL, value, kind, type);
3097 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3098 return new_d_SymConst(NULL, value, kind);
3100 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3101 return new_d_simpleSel(NULL, store, objptr, ent);
3103 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3105 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3107 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3109 return new_d_Call(NULL, store, callee, arity, in, tp);
3111 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3112 return new_d_Add(NULL, op1, op2, mode);
3114 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3115 return new_d_Sub(NULL, op1, op2, mode);
3117 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3118 return new_d_Minus(NULL, op, mode);
3120 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3121 return new_d_Mul(NULL, op1, op2, mode);
3123 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2) {
3124 return new_d_Quot(NULL, memop, op1, op2);
3126 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2) {
3127 return new_d_DivMod(NULL, memop, op1, op2);
3129 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2) {
3130 return new_d_Div(NULL, memop, op1, op2);
3132 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2) {
3133 return new_d_Mod(NULL, memop, op1, op2);
3135 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3136 return new_d_Abs(NULL, op, mode);
3138 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3139 return new_d_And(NULL, op1, op2, mode);
3141 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3142 return new_d_Or(NULL, op1, op2, mode);
3144 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3145 return new_d_Eor(NULL, op1, op2, mode);
3147 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3148 return new_d_Not(NULL, op, mode);
3150 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3151 return new_d_Shl(NULL, op, k, mode);
3153 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3154 return new_d_Shr(NULL, op, k, mode);
3156 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3157 return new_d_Shrs(NULL, op, k, mode);
3159 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3160 return new_d_Rot(NULL, op, k, mode);
3162 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3163 return new_d_Carry(NULL, op1, op2, mode);
3165 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3166 return new_d_Borrow(NULL, op1, op2, mode);
3168 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3169 return new_d_Cmp(NULL, op1, op2);
3171 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3172 return new_d_Conv(NULL, op, mode);
3174 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3175 return new_d_strictConv(NULL, op, mode);
3177 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3178 return new_d_Cast(NULL, op, to_tp);
3180 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3181 return new_d_Phi(NULL, arity, in, mode);
3183 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3184 return new_d_Load(NULL, store, addr, mode);
3186 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3187 return new_d_Store(NULL, store, addr, val);
3189 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3190 where_alloc where) {
3191 return new_d_Alloc(NULL, store, size, alloc_type, where);
3193 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3194 ir_type *free_type, where_alloc where) {
3195 return new_d_Free(NULL, store, ptr, size, free_type, where);
3197 ir_node *new_Sync(int arity, ir_node *in[]) {
3198 return new_d_Sync(NULL, arity, in);
3200 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3201 return new_d_Proj(NULL, arg, mode, proj);
3203 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3204 return new_d_defaultProj(NULL, arg, max_proj);
3206 ir_node *new_Tuple(int arity, ir_node **in) {
3207 return new_d_Tuple(NULL, arity, in);
3209 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3210 return new_d_Id(NULL, val, mode);
3212 ir_node *new_Bad(void) {
3215 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3216 return new_d_Confirm (NULL, val, bound, cmp);
3218 ir_node *new_Unknown(ir_mode *m) {
3219 return new_d_Unknown(m);
3221 ir_node *new_CallBegin(ir_node *callee) {
3222 return new_d_CallBegin(NULL, callee);
3224 ir_node *new_EndReg(void) {
3225 return new_d_EndReg(NULL);
3227 ir_node *new_EndExcept(void) {
3228 return new_d_EndExcept(NULL);
3230 ir_node *new_Break(void) {
3231 return new_d_Break(NULL);
3233 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3234 return new_d_Filter(NULL, arg, mode, proj);
3236 ir_node *new_NoMem(void) {
3237 return new_d_NoMem();
3239 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3240 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3242 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3243 return new_d_Psi(NULL, arity, conds, vals, mode);
3245 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3246 return new_d_CopyB(NULL, store, dst, src, data_type);
3248 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3249 return new_d_InstOf(NULL, store, objptr, ent);
3251 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3252 return new_d_Raise(NULL, store, obj);
3254 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3255 return new_d_Bound(NULL, store, idx, lower, upper);
3257 ir_node *new_Pin(ir_node *node) {
3258 return new_d_Pin(NULL, node);