3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler, Michael Beck
10 * Copyright: (c) 1998-2006 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
29 #include "irgraph_t.h"
33 #include "firm_common_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
45 #if USE_EXPLICIT_PHI_IN_STACK
46 /* A stack needed for the automatic Phi node construction in constructor
47 Phi_in. Redefinition in irgraph.c!! */
52 typedef struct Phi_in_stack Phi_in_stack;
55 /* when we need verifying */
57 # define IRN_VRFY_IRG(res, irg)
59 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
63 * Language dependent variable initialization callback.
65 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
67 /* creates a bd constructor for a binop */
68 #define NEW_BD_BINOP(instr) \
70 new_bd_##instr(dbg_info *db, ir_node *block, \
71 ir_node *op1, ir_node *op2, ir_mode *mode) \
75 ir_graph *irg = current_ir_graph; \
78 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
79 res = optimize_node(res); \
80 IRN_VRFY_IRG(res, irg); \
84 /* creates a bd constructor for an unop */
85 #define NEW_BD_UNOP(instr) \
87 new_bd_##instr(dbg_info *db, ir_node *block, \
88 ir_node *op, ir_mode *mode) \
91 ir_graph *irg = current_ir_graph; \
92 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
93 res = optimize_node(res); \
94 IRN_VRFY_IRG(res, irg); \
98 /* creates a bd constructor for an divop */
99 #define NEW_BD_DIVOP(instr) \
101 new_bd_##instr(dbg_info *db, ir_node *block, \
102 ir_node *memop, ir_node *op1, ir_node *op2) \
106 ir_graph *irg = current_ir_graph; \
110 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
111 res = optimize_node(res); \
112 IRN_VRFY_IRG(res, irg); \
116 /* creates a rd constructor for a binop */
117 #define NEW_RD_BINOP(instr) \
119 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
120 ir_node *op1, ir_node *op2, ir_mode *mode) \
123 ir_graph *rem = current_ir_graph; \
124 current_ir_graph = irg; \
125 res = new_bd_##instr(db, block, op1, op2, mode); \
126 current_ir_graph = rem; \
130 /* creates a rd constructor for an unop */
131 #define NEW_RD_UNOP(instr) \
133 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
134 ir_node *op, ir_mode *mode) \
137 ir_graph *rem = current_ir_graph; \
138 current_ir_graph = irg; \
139 res = new_bd_##instr(db, block, op, mode); \
140 current_ir_graph = rem; \
144 /* creates a rd constructor for an divop */
145 #define NEW_RD_DIVOP(instr) \
147 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
148 ir_node *memop, ir_node *op1, ir_node *op2) \
151 ir_graph *rem = current_ir_graph; \
152 current_ir_graph = irg; \
153 res = new_bd_##instr(db, block, memop, op1, op2); \
154 current_ir_graph = rem; \
158 /* creates a d constructor for an binop */
159 #define NEW_D_BINOP(instr) \
161 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
162 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
165 /* creates a d constructor for an unop */
166 #define NEW_D_UNOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
174 * Constructs a Block with a fixed number of predecessors.
175 * Does not set current_block. Can not be used with automatic
176 * Phi node construction.
179 new_bd_Block(dbg_info *db, int arity, ir_node **in)
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
185 set_Block_matured(res, 1);
186 set_Block_block_visited(res, 0);
188 /* res->attr.block.exc = exc_normal; */
189 /* res->attr.block.handler_entry = 0; */
190 res->attr.block.dead = 0;
191 res->attr.block.irg = irg;
192 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
193 res->attr.block.in_cg = NULL;
194 res->attr.block.cg_backedge = NULL;
195 res->attr.block.extblk = NULL;
197 IRN_VRFY_IRG(res, irg);
202 new_bd_Start(dbg_info *db, ir_node *block)
205 ir_graph *irg = current_ir_graph;
207 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
208 /* res->attr.start.irg = irg; */
210 IRN_VRFY_IRG(res, irg);
215 new_bd_End(dbg_info *db, ir_node *block)
218 ir_graph *irg = current_ir_graph;
220 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
222 IRN_VRFY_IRG(res, irg);
227 * Creates a Phi node with all predecessors. Calling this constructor
228 * is only allowed if the corresponding block is mature.
231 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
234 ir_graph *irg = current_ir_graph;
238 /* Don't assert that block matured: the use of this constructor is strongly
240 if ( get_Block_matured(block) )
241 assert( get_irn_arity(block) == arity );
243 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
245 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
247 for (i = arity-1; i >= 0; i--)
248 if (get_irn_op(in[i]) == op_Unknown) {
253 if (!has_unknown) res = optimize_node (res);
254 IRN_VRFY_IRG(res, irg);
256 /* Memory Phis in endless loops must be kept alive.
257 As we can't distinguish these easily we keep all of them alive. */
258 if ((res->op == op_Phi) && (mode == mode_M))
259 add_End_keepalive(get_irg_end(irg), res);
264 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
267 ir_graph *irg = current_ir_graph;
269 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
270 res->attr.con.tv = con;
271 set_Const_type(res, tp); /* Call method because of complex assertion. */
272 res = optimize_node (res);
273 assert(get_Const_type(res) == tp);
274 IRN_VRFY_IRG(res, irg);
277 } /* new_bd_Const_type */
280 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
282 ir_graph *irg = current_ir_graph;
284 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
288 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value)
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
293 } /* new_bd_Const_long */
296 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
299 ir_graph *irg = current_ir_graph;
301 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
308 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
312 ir_graph *irg = current_ir_graph;
314 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
315 res->attr.proj = proj;
318 assert(get_Proj_pred(res));
319 assert(get_nodes_block(get_Proj_pred(res)));
321 res = optimize_node(res);
323 IRN_VRFY_IRG(res, irg);
329 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
333 ir_graph *irg = current_ir_graph;
335 assert(arg->op == op_Cond);
336 arg->attr.cond.kind = fragmentary;
337 arg->attr.cond.default_proj = max_proj;
338 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
340 } /* new_bd_defaultProj */
343 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag)
346 ir_graph *irg = current_ir_graph;
348 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
349 res->attr.conv.strict = strict_flag;
350 res = optimize_node(res);
351 IRN_VRFY_IRG(res, irg);
356 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in)
374 ir_graph *irg = current_ir_graph;
376 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
377 res = optimize_node (res);
378 IRN_VRFY_IRG(res, irg);
403 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
407 ir_graph *irg = current_ir_graph;
410 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_bd_Jmp(dbg_info *db, ir_node *block)
420 ir_graph *irg = current_ir_graph;
422 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
423 res = optimize_node (res);
424 IRN_VRFY_IRG (res, irg);
429 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt)
432 ir_graph *irg = current_ir_graph;
434 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
435 res = optimize_node (res);
436 IRN_VRFY_IRG (res, irg);
438 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
444 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c)
447 ir_graph *irg = current_ir_graph;
449 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
450 res->attr.cond.kind = dense;
451 res->attr.cond.default_proj = 0;
452 res->attr.cond.pred = COND_JMP_PRED_NONE;
453 res = optimize_node (res);
454 IRN_VRFY_IRG(res, irg);
459 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
460 ir_node *callee, int arity, ir_node **in, ir_type *tp)
465 ir_graph *irg = current_ir_graph;
468 NEW_ARR_A(ir_node *, r_in, r_arity);
471 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
473 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
475 assert((get_unknown_type() == tp) || is_Method_type(tp));
476 set_Call_type(res, tp);
477 res->attr.call.exc.pin_state = op_pin_state_pinned;
478 res->attr.call.callee_arr = NULL;
479 res = optimize_node(res);
480 IRN_VRFY_IRG(res, irg);
485 new_bd_Return(dbg_info *db, ir_node *block,
486 ir_node *store, int arity, ir_node **in)
491 ir_graph *irg = current_ir_graph;
494 NEW_ARR_A (ir_node *, r_in, r_arity);
496 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
497 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
498 res = optimize_node(res);
499 IRN_VRFY_IRG(res, irg);
504 new_bd_Load(dbg_info *db, ir_node *block,
505 ir_node *store, ir_node *adr, ir_mode *mode)
509 ir_graph *irg = current_ir_graph;
513 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
514 res->attr.load.exc.pin_state = op_pin_state_pinned;
515 res->attr.load.load_mode = mode;
516 res->attr.load.volatility = volatility_non_volatile;
517 res = optimize_node(res);
518 IRN_VRFY_IRG(res, irg);
523 new_bd_Store(dbg_info *db, ir_node *block,
524 ir_node *store, ir_node *adr, ir_node *val)
528 ir_graph *irg = current_ir_graph;
533 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
534 res->attr.store.exc.pin_state = op_pin_state_pinned;
535 res->attr.store.volatility = volatility_non_volatile;
536 res = optimize_node(res);
537 IRN_VRFY_IRG(res, irg);
542 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
543 ir_node *size, ir_type *alloc_type, where_alloc where)
547 ir_graph *irg = current_ir_graph;
551 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
552 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
553 res->attr.alloc.where = where;
554 res->attr.alloc.type = alloc_type;
555 res = optimize_node(res);
556 IRN_VRFY_IRG(res, irg);
561 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
562 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
566 ir_graph *irg = current_ir_graph;
571 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
572 res->attr.free.where = where;
573 res->attr.free.type = free_type;
574 res = optimize_node(res);
575 IRN_VRFY_IRG(res, irg);
580 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
581 int arity, ir_node **in, ir_entity *ent)
586 ir_graph *irg = current_ir_graph;
588 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
591 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
594 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
596 * FIXM: Sel's can select functions which should be of mode mode_P_code.
598 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
599 res->attr.sel.ent = ent;
600 res = optimize_node(res);
601 IRN_VRFY_IRG(res, irg);
606 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
607 symconst_kind symkind, ir_type *tp) {
610 ir_graph *irg = current_ir_graph;
612 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
613 mode = mode_P_data; /* FIXME: can be mode_P_code */
617 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
619 res->attr.symc.num = symkind;
620 res->attr.symc.sym = value;
621 res->attr.symc.tp = tp;
623 res = optimize_node(res);
624 IRN_VRFY_IRG(res, irg);
626 } /* new_bd_SymConst_type */
629 new_bd_Sync(dbg_info *db, ir_node *block)
632 ir_graph *irg = current_ir_graph;
634 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
635 /* no need to call optimize node here, Sync are always created with no predecessors */
636 IRN_VRFY_IRG(res, irg);
641 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
643 ir_node *in[2], *res;
644 ir_graph *irg = current_ir_graph;
648 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
649 res->attr.confirm_cmp = cmp;
650 res = optimize_node (res);
651 IRN_VRFY_IRG(res, irg);
655 /* this function is often called with current_ir_graph unset */
657 new_bd_Unknown(ir_mode *m)
660 ir_graph *irg = current_ir_graph;
662 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
663 res = optimize_node(res);
665 } /* new_bd_Unknown */
668 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call)
672 ir_graph *irg = current_ir_graph;
674 in[0] = get_Call_ptr(call);
675 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
676 /* res->attr.callbegin.irg = irg; */
677 res->attr.callbegin.call = call;
678 res = optimize_node(res);
679 IRN_VRFY_IRG(res, irg);
681 } /* new_bd_CallBegin */
684 new_bd_EndReg(dbg_info *db, ir_node *block)
687 ir_graph *irg = current_ir_graph;
689 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
690 set_irg_end_reg(irg, res);
691 IRN_VRFY_IRG(res, irg);
693 } /* new_bd_EndReg */
696 new_bd_EndExcept(dbg_info *db, ir_node *block)
699 ir_graph *irg = current_ir_graph;
701 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
702 set_irg_end_except(irg, res);
703 IRN_VRFY_IRG (res, irg);
705 } /* new_bd_EndExcept */
708 new_bd_Break(dbg_info *db, ir_node *block)
711 ir_graph *irg = current_ir_graph;
713 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
714 res = optimize_node(res);
715 IRN_VRFY_IRG(res, irg);
720 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
724 ir_graph *irg = current_ir_graph;
726 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
727 res->attr.filter.proj = proj;
728 res->attr.filter.in_cg = NULL;
729 res->attr.filter.backedge = NULL;
732 assert(get_Proj_pred(res));
733 assert(get_nodes_block(get_Proj_pred(res)));
735 res = optimize_node(res);
736 IRN_VRFY_IRG(res, irg);
738 } /* new_bd_Filter */
741 new_bd_Mux(dbg_info *db, ir_node *block,
742 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
746 ir_graph *irg = current_ir_graph;
752 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
761 new_bd_Psi(dbg_info *db, ir_node *block,
762 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
766 ir_graph *irg = current_ir_graph;
769 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
771 for (i = 0; i < arity; ++i) {
773 in[2 * i + 1] = vals[i];
777 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
780 res = optimize_node(res);
781 IRN_VRFY_IRG(res, irg);
786 new_bd_CopyB(dbg_info *db, ir_node *block,
787 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
791 ir_graph *irg = current_ir_graph;
797 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
799 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
800 res->attr.copyb.data_type = data_type;
801 res = optimize_node(res);
802 IRN_VRFY_IRG(res, irg);
807 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
808 ir_node *objptr, ir_type *type)
812 ir_graph *irg = current_ir_graph;
816 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
817 res->attr.instof.type = type;
818 res = optimize_node(res);
819 IRN_VRFY_IRG(res, irg);
821 } /* new_bd_InstOf */
824 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
828 ir_graph *irg = current_ir_graph;
832 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
833 res = optimize_node(res);
834 IRN_VRFY_IRG(res, irg);
839 new_bd_Bound(dbg_info *db, ir_node *block,
840 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
844 ir_graph *irg = current_ir_graph;
850 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
851 res->attr.bound.exc.pin_state = op_pin_state_pinned;
852 res = optimize_node(res);
853 IRN_VRFY_IRG(res, irg);
858 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node)
861 ir_graph *irg = current_ir_graph;
863 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
864 res = optimize_node(res);
865 IRN_VRFY_IRG(res, irg);
869 /* --------------------------------------------- */
870 /* private interfaces, for professional use only */
871 /* --------------------------------------------- */
873 /* Constructs a Block with a fixed number of predecessors.
874 Does not set current_block. Can not be used with automatic
875 Phi node construction. */
877 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in)
879 ir_graph *rem = current_ir_graph;
882 current_ir_graph = irg;
883 res = new_bd_Block(db, arity, in);
884 current_ir_graph = rem;
890 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
892 ir_graph *rem = current_ir_graph;
895 current_ir_graph = irg;
896 res = new_bd_Start(db, block);
897 current_ir_graph = rem;
903 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
906 ir_graph *rem = current_ir_graph;
908 current_ir_graph = rem;
909 res = new_bd_End(db, block);
910 current_ir_graph = rem;
915 /* Creates a Phi node with all predecessors. Calling this constructor
916 is only allowed if the corresponding block is mature. */
918 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
921 ir_graph *rem = current_ir_graph;
923 current_ir_graph = irg;
924 res = new_bd_Phi(db, block,arity, in, mode);
925 current_ir_graph = rem;
931 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
934 ir_graph *rem = current_ir_graph;
936 current_ir_graph = irg;
937 res = new_bd_Const_type(db, block, mode, con, tp);
938 current_ir_graph = rem;
941 } /* new_rd_Const_type */
944 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
947 ir_graph *rem = current_ir_graph;
949 current_ir_graph = irg;
950 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
951 current_ir_graph = rem;
957 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
959 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
960 } /* new_rd_Const_long */
963 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
966 ir_graph *rem = current_ir_graph;
968 current_ir_graph = irg;
969 res = new_bd_Id(db, block, val, mode);
970 current_ir_graph = rem;
976 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
980 ir_graph *rem = current_ir_graph;
982 current_ir_graph = irg;
983 res = new_bd_Proj(db, block, arg, mode, proj);
984 current_ir_graph = rem;
990 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
994 ir_graph *rem = current_ir_graph;
996 current_ir_graph = irg;
997 res = new_bd_defaultProj(db, block, arg, max_proj);
998 current_ir_graph = rem;
1001 } /* new_rd_defaultProj */
1004 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1007 ir_graph *rem = current_ir_graph;
1009 current_ir_graph = irg;
1010 res = new_bd_Conv(db, block, op, mode, 0);
1011 current_ir_graph = rem;
1017 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1020 ir_graph *rem = current_ir_graph;
1022 current_ir_graph = irg;
1023 res = new_bd_Cast(db, block, op, to_tp);
1024 current_ir_graph = rem;
1030 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1033 ir_graph *rem = current_ir_graph;
1035 current_ir_graph = irg;
1036 res = new_bd_Tuple(db, block, arity, in);
1037 current_ir_graph = rem;
1040 } /* new_rd_Tuple */
1047 NEW_RD_DIVOP(DivMod)
1060 NEW_RD_BINOP(Borrow)
1063 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1064 ir_node *op1, ir_node *op2)
1067 ir_graph *rem = current_ir_graph;
1069 current_ir_graph = irg;
1070 res = new_bd_Cmp(db, block, op1, op2);
1071 current_ir_graph = rem;
1077 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block)
1080 ir_graph *rem = current_ir_graph;
1082 current_ir_graph = irg;
1083 res = new_bd_Jmp(db, block);
1084 current_ir_graph = rem;
1090 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1093 ir_graph *rem = current_ir_graph;
1095 current_ir_graph = irg;
1096 res = new_bd_IJmp(db, block, tgt);
1097 current_ir_graph = rem;
1103 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1106 ir_graph *rem = current_ir_graph;
1108 current_ir_graph = irg;
1109 res = new_bd_Cond(db, block, c);
1110 current_ir_graph = rem;
1116 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1117 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1120 ir_graph *rem = current_ir_graph;
1122 current_ir_graph = irg;
1123 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1124 current_ir_graph = rem;
1130 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1131 ir_node *store, int arity, ir_node **in)
1134 ir_graph *rem = current_ir_graph;
1136 current_ir_graph = irg;
1137 res = new_bd_Return(db, block, store, arity, in);
1138 current_ir_graph = rem;
1141 } /* new_rd_Return */
1144 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1145 ir_node *store, ir_node *adr, ir_mode *mode)
1148 ir_graph *rem = current_ir_graph;
1150 current_ir_graph = irg;
1151 res = new_bd_Load(db, block, store, adr, mode);
1152 current_ir_graph = rem;
1158 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1159 ir_node *store, ir_node *adr, ir_node *val)
1162 ir_graph *rem = current_ir_graph;
1164 current_ir_graph = irg;
1165 res = new_bd_Store(db, block, store, adr, val);
1166 current_ir_graph = rem;
1169 } /* new_rd_Store */
1172 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1173 ir_node *size, ir_type *alloc_type, where_alloc where)
1176 ir_graph *rem = current_ir_graph;
1178 current_ir_graph = irg;
1179 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1180 current_ir_graph = rem;
1183 } /* new_rd_Alloc */
1186 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1187 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1190 ir_graph *rem = current_ir_graph;
1192 current_ir_graph = irg;
1193 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1194 current_ir_graph = rem;
1200 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1201 ir_node *store, ir_node *objptr, ir_entity *ent)
1204 ir_graph *rem = current_ir_graph;
1206 current_ir_graph = irg;
1207 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1208 current_ir_graph = rem;
1211 } /* new_rd_simpleSel */
1214 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1215 int arity, ir_node **in, ir_entity *ent)
1218 ir_graph *rem = current_ir_graph;
1220 current_ir_graph = irg;
1221 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1222 current_ir_graph = rem;
1228 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1229 symconst_kind symkind, ir_type *tp)
1232 ir_graph *rem = current_ir_graph;
1234 current_ir_graph = irg;
1235 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1236 current_ir_graph = rem;
1239 } /* new_rd_SymConst_type */
1242 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1243 symconst_kind symkind)
1245 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1247 } /* new_rd_SymConst */
1249 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp)
1251 symconst_symbol sym;
1252 sym.entity_p = symbol;
1253 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1254 } /* new_rd_SymConst_addr_ent */
1256 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp)
1258 symconst_symbol sym;
1259 sym.entity_p = symbol;
1260 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1261 } /* new_rd_SymConst_ofs_ent */
1263 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1264 symconst_symbol sym;
1265 sym.ident_p = symbol;
1266 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1267 } /* new_rd_SymConst_addr_name */
1269 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1270 symconst_symbol sym;
1271 sym.type_p = symbol;
1272 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1273 } /* new_rd_SymConst_type_tag */
1275 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1276 symconst_symbol sym;
1277 sym.type_p = symbol;
1278 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1279 } /* new_rd_SymConst_size */
1281 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1282 symconst_symbol sym;
1283 sym.type_p = symbol;
1284 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1285 } /* new_rd_SymConst_align */
1288 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1291 ir_graph *rem = current_ir_graph;
1294 current_ir_graph = irg;
1295 res = new_bd_Sync(db, block);
1296 current_ir_graph = rem;
1298 for (i = 0; i < arity; ++i)
1299 add_Sync_pred(res, in[i]);
1305 new_rd_Bad(ir_graph *irg) {
1306 return get_irg_bad(irg);
1310 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1313 ir_graph *rem = current_ir_graph;
1315 current_ir_graph = irg;
1316 res = new_bd_Confirm(db, block, val, bound, cmp);
1317 current_ir_graph = rem;
1320 } /* new_rd_Confirm */
1322 /* this function is often called with current_ir_graph unset */
1324 new_rd_Unknown(ir_graph *irg, ir_mode *m)
1327 ir_graph *rem = current_ir_graph;
1329 current_ir_graph = irg;
1330 res = new_bd_Unknown(m);
1331 current_ir_graph = rem;
1334 } /* new_rd_Unknown */
1337 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1340 ir_graph *rem = current_ir_graph;
1342 current_ir_graph = irg;
1343 res = new_bd_CallBegin(db, block, call);
1344 current_ir_graph = rem;
1347 } /* new_rd_CallBegin */
1350 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
1354 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1355 set_irg_end_reg(irg, res);
1356 IRN_VRFY_IRG(res, irg);
1358 } /* new_rd_EndReg */
1361 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
1365 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1366 set_irg_end_except(irg, res);
1367 IRN_VRFY_IRG (res, irg);
1369 } /* new_rd_EndExcept */
1372 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block)
1375 ir_graph *rem = current_ir_graph;
1377 current_ir_graph = irg;
1378 res = new_bd_Break(db, block);
1379 current_ir_graph = rem;
1382 } /* new_rd_Break */
1385 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1389 ir_graph *rem = current_ir_graph;
1391 current_ir_graph = irg;
1392 res = new_bd_Filter(db, block, arg, mode, proj);
1393 current_ir_graph = rem;
1396 } /* new_rd_Filter */
1399 new_rd_NoMem(ir_graph *irg) {
1400 return get_irg_no_mem(irg);
1401 } /* new_rd_NoMem */
1404 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1405 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1408 ir_graph *rem = current_ir_graph;
1410 current_ir_graph = irg;
1411 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1412 current_ir_graph = rem;
1418 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1419 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1422 ir_graph *rem = current_ir_graph;
1424 current_ir_graph = irg;
1425 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1426 current_ir_graph = rem;
1431 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1432 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1435 ir_graph *rem = current_ir_graph;
1437 current_ir_graph = irg;
1438 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1439 current_ir_graph = rem;
1442 } /* new_rd_CopyB */
1445 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1446 ir_node *objptr, ir_type *type)
1449 ir_graph *rem = current_ir_graph;
1451 current_ir_graph = irg;
1452 res = new_bd_InstOf(db, block, store, objptr, type);
1453 current_ir_graph = rem;
1456 } /* new_rd_InstOf */
1459 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1462 ir_graph *rem = current_ir_graph;
1464 current_ir_graph = irg;
1465 res = new_bd_Raise(db, block, store, obj);
1466 current_ir_graph = rem;
1469 } /* new_rd_Raise */
1471 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1472 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1475 ir_graph *rem = current_ir_graph;
1477 current_ir_graph = irg;
1478 res = new_bd_Bound(db, block, store, idx, lower, upper);
1479 current_ir_graph = rem;
1482 } /* new_rd_Bound */
1484 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node)
1487 ir_graph *rem = current_ir_graph;
1489 current_ir_graph = irg;
1490 res = new_bd_Pin(db, block, node);
1491 current_ir_graph = rem;
1496 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1497 return new_rd_Block(NULL, irg, arity, in);
1499 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1500 return new_rd_Start(NULL, irg, block);
1502 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1503 return new_rd_End(NULL, irg, block);
1505 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1506 return new_rd_Jmp(NULL, irg, block);
1508 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1509 return new_rd_IJmp(NULL, irg, block, tgt);
1511 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1512 return new_rd_Cond(NULL, irg, block, c);
1514 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1515 ir_node *store, int arity, ir_node **in) {
1516 return new_rd_Return(NULL, irg, block, store, arity, in);
1518 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1519 ir_mode *mode, tarval *con) {
1520 return new_rd_Const(NULL, irg, block, mode, con);
1522 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1523 ir_mode *mode, long value) {
1524 return new_rd_Const_long(NULL, irg, block, mode, value);
1526 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1527 ir_mode *mode, tarval *con, ir_type *tp) {
1528 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1530 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1531 symconst_symbol value, symconst_kind symkind) {
1532 return new_rd_SymConst(NULL, irg, block, value, symkind);
1534 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1535 ir_node *objptr, ir_entity *ent) {
1536 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1538 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1539 ir_node *objptr, int n_index, ir_node **index,
1541 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1543 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1544 ir_node *callee, int arity, ir_node **in,
1546 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1548 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1549 ir_node *op1, ir_node *op2, ir_mode *mode) {
1550 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1552 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1553 ir_node *op1, ir_node *op2, ir_mode *mode) {
1554 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1556 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1557 ir_node *op, ir_mode *mode) {
1558 return new_rd_Minus(NULL, irg, block, op, mode);
1560 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1561 ir_node *op1, ir_node *op2, ir_mode *mode) {
1562 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1564 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1565 ir_node *memop, ir_node *op1, ir_node *op2) {
1566 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1568 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1569 ir_node *memop, ir_node *op1, ir_node *op2) {
1570 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1572 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1573 ir_node *memop, ir_node *op1, ir_node *op2) {
1574 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1576 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1577 ir_node *memop, ir_node *op1, ir_node *op2) {
1578 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1580 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1581 ir_node *op, ir_mode *mode) {
1582 return new_rd_Abs(NULL, irg, block, op, mode);
1584 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1585 ir_node *op1, ir_node *op2, ir_mode *mode) {
1586 return new_rd_And(NULL, irg, block, op1, op2, mode);
1588 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1589 ir_node *op1, ir_node *op2, ir_mode *mode) {
1590 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1592 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1593 ir_node *op1, ir_node *op2, ir_mode *mode) {
1594 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1596 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1597 ir_node *op, ir_mode *mode) {
1598 return new_rd_Not(NULL, irg, block, op, mode);
1600 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1601 ir_node *op, ir_node *k, ir_mode *mode) {
1602 return new_rd_Shl(NULL, irg, block, op, k, mode);
1604 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1605 ir_node *op, ir_node *k, ir_mode *mode) {
1606 return new_rd_Shr(NULL, irg, block, op, k, mode);
1608 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1609 ir_node *op, ir_node *k, ir_mode *mode) {
1610 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1612 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1613 ir_node *op, ir_node *k, ir_mode *mode) {
1614 return new_rd_Rot(NULL, irg, block, op, k, mode);
1616 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1617 ir_node *op, ir_node *k, ir_mode *mode) {
1618 return new_rd_Carry(NULL, irg, block, op, k, mode);
1620 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1621 ir_node *op, ir_node *k, ir_mode *mode) {
1622 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1624 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1625 ir_node *op1, ir_node *op2) {
1626 return new_rd_Cmp(NULL, irg, block, op1, op2);
1628 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1629 ir_node *op, ir_mode *mode) {
1630 return new_rd_Conv(NULL, irg, block, op, mode);
1632 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1633 return new_rd_Cast(NULL, irg, block, op, to_tp);
1635 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1636 ir_node **in, ir_mode *mode) {
1637 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1639 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1640 ir_node *store, ir_node *adr, ir_mode *mode) {
1641 return new_rd_Load(NULL, irg, block, store, adr, mode);
1643 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1644 ir_node *store, ir_node *adr, ir_node *val) {
1645 return new_rd_Store(NULL, irg, block, store, adr, val);
1647 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1648 ir_node *size, ir_type *alloc_type, where_alloc where) {
1649 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1651 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1652 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1653 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1655 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1656 return new_rd_Sync(NULL, irg, block, arity, in);
1658 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1659 ir_mode *mode, long proj) {
1660 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1662 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1664 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1666 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1667 int arity, ir_node **in) {
1668 return new_rd_Tuple(NULL, irg, block, arity, in );
1670 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1671 ir_node *val, ir_mode *mode) {
1672 return new_rd_Id(NULL, irg, block, val, mode);
1674 ir_node *new_r_Bad (ir_graph *irg) {
1675 return new_rd_Bad(irg);
1677 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1678 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1680 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1681 return new_rd_Unknown(irg, m);
1683 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1684 return new_rd_CallBegin(NULL, irg, block, callee);
1686 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1687 return new_rd_EndReg(NULL, irg, block);
1689 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1690 return new_rd_EndExcept(NULL, irg, block);
1692 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1693 return new_rd_Break(NULL, irg, block);
1695 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1696 ir_mode *mode, long proj) {
1697 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1699 ir_node *new_r_NoMem (ir_graph *irg) {
1700 return new_rd_NoMem(irg);
1702 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1703 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1704 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1706 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1707 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1708 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1710 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1711 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1712 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1714 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1716 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1718 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1719 ir_node *store, ir_node *obj) {
1720 return new_rd_Raise(NULL, irg, block, store, obj);
1722 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1723 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1724 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1726 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1727 return new_rd_Pin(NULL, irg, block, node);
1730 /** ********************/
1731 /** public interfaces */
1732 /** construction tools */
1736 * - create a new Start node in the current block
1738 * @return s - pointer to the created Start node
1743 new_d_Start(dbg_info *db)
1747 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1748 op_Start, mode_T, 0, NULL);
1749 /* res->attr.start.irg = current_ir_graph; */
1751 res = optimize_node(res);
1752 IRN_VRFY_IRG(res, current_ir_graph);
1757 new_d_End(dbg_info *db)
1760 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1761 op_End, mode_X, -1, NULL);
1762 res = optimize_node(res);
1763 IRN_VRFY_IRG(res, current_ir_graph);
1768 /* Constructs a Block with a fixed number of predecessors.
1769 Does set current_block. Can be used with automatic Phi
1770 node construction. */
1772 new_d_Block(dbg_info *db, int arity, ir_node **in)
1776 int has_unknown = 0;
1778 res = new_bd_Block(db, arity, in);
1780 /* Create and initialize array for Phi-node construction. */
1781 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1782 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1783 current_ir_graph->n_loc);
1784 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1787 for (i = arity-1; i >= 0; i--)
1788 if (get_irn_op(in[i]) == op_Unknown) {
1793 if (!has_unknown) res = optimize_node(res);
1794 current_ir_graph->current_block = res;
1796 IRN_VRFY_IRG(res, current_ir_graph);
1801 /* ***********************************************************************/
1802 /* Methods necessary for automatic Phi node creation */
1804 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1805 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1806 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1807 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1809 Call Graph: ( A ---> B == A "calls" B)
1811 get_value mature_immBlock
1819 get_r_value_internal |
1823 new_rd_Phi0 new_rd_Phi_in
1825 * *************************************************************************** */
1827 /** Creates a Phi node with 0 predecessors. */
1828 static INLINE ir_node *
1829 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
1833 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1834 IRN_VRFY_IRG(res, irg);
1838 /* There are two implementations of the Phi node construction. The first
1839 is faster, but does not work for blocks with more than 2 predecessors.
1840 The second works always but is slower and causes more unnecessary Phi
1842 Select the implementations by the following preprocessor flag set in
1844 #if USE_FAST_PHI_CONSTRUCTION
1846 /* This is a stack used for allocating and deallocating nodes in
1847 new_rd_Phi_in. The original implementation used the obstack
1848 to model this stack, now it is explicit. This reduces side effects.
1850 #if USE_EXPLICIT_PHI_IN_STACK
1852 new_Phi_in_stack(void) {
1855 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1857 res->stack = NEW_ARR_F (ir_node *, 0);
1861 } /* new_Phi_in_stack */
1864 free_Phi_in_stack(Phi_in_stack *s) {
1865 DEL_ARR_F(s->stack);
1867 } /* free_Phi_in_stack */
1870 free_to_Phi_in_stack(ir_node *phi) {
1871 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1872 current_ir_graph->Phi_in_stack->pos)
1873 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1875 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1877 (current_ir_graph->Phi_in_stack->pos)++;
1878 } /* free_to_Phi_in_stack */
1880 static INLINE ir_node *
1881 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1882 int arity, ir_node **in) {
1884 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1885 int pos = current_ir_graph->Phi_in_stack->pos;
1889 /* We need to allocate a new node */
1890 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1891 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1893 /* reuse the old node and initialize it again. */
1896 assert (res->kind == k_ir_node);
1897 assert (res->op == op_Phi);
1901 assert (arity >= 0);
1902 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1903 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1905 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1907 (current_ir_graph->Phi_in_stack->pos)--;
1910 } /* alloc_or_pop_from_Phi_in_stack */
1911 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1914 * Creates a Phi node with a given, fixed array **in of predecessors.
1915 * If the Phi node is unnecessary, as the same value reaches the block
1916 * through all control flow paths, it is eliminated and the value
1917 * returned directly. This constructor is only intended for use in
1918 * the automatic Phi node generation triggered by get_value or mature.
1919 * The implementation is quite tricky and depends on the fact, that
1920 * the nodes are allocated on a stack:
1921 * The in array contains predecessors and NULLs. The NULLs appear,
1922 * if get_r_value_internal, that computed the predecessors, reached
1923 * the same block on two paths. In this case the same value reaches
1924 * this block on both paths, there is no definition in between. We need
1925 * not allocate a Phi where these path's merge, but we have to communicate
1926 * this fact to the caller. This happens by returning a pointer to the
1927 * node the caller _will_ allocate. (Yes, we predict the address. We can
1928 * do so because the nodes are allocated on the obstack.) The caller then
1929 * finds a pointer to itself and, when this routine is called again,
1930 * eliminates itself.
1932 static INLINE ir_node *
1933 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1936 ir_node *res, *known;
1938 /* Allocate a new node on the obstack. This can return a node to
1939 which some of the pointers in the in-array already point.
1940 Attention: the constructor copies the in array, i.e., the later
1941 changes to the array in this routine do not affect the
1942 constructed node! If the in array contains NULLs, there will be
1943 missing predecessors in the returned node. Is this a possible
1944 internal state of the Phi node generation? */
1945 #if USE_EXPLICIT_PHI_IN_STACK
1946 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1948 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1949 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1952 /* The in-array can contain NULLs. These were returned by
1953 get_r_value_internal if it reached the same block/definition on a
1954 second path. The NULLs are replaced by the node itself to
1955 simplify the test in the next loop. */
1956 for (i = 0; i < ins; ++i) {
1961 /* This loop checks whether the Phi has more than one predecessor.
1962 If so, it is a real Phi node and we break the loop. Else the Phi
1963 node merges the same definition on several paths and therefore is
1965 for (i = 0; i < ins; ++i) {
1966 if (in[i] == res || in[i] == known)
1975 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1977 #if USE_EXPLICIT_PHI_IN_STACK
1978 free_to_Phi_in_stack(res);
1980 edges_node_deleted(res, current_ir_graph);
1981 obstack_free(current_ir_graph->obst, res);
1985 res = optimize_node (res);
1986 IRN_VRFY_IRG(res, irg);
1989 /* return the pointer to the Phi node. This node might be deallocated! */
1991 } /* new_rd_Phi_in */
1994 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1997 * Allocates and returns this node. The routine called to allocate the
1998 * node might optimize it away and return a real value, or even a pointer
1999 * to a deallocated Phi node on top of the obstack!
2000 * This function is called with an in-array of proper size.
2003 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2005 ir_node *prevBlock, *res;
2008 /* This loop goes to all predecessor blocks of the block the Phi node is in
2009 and there finds the operands of the Phi node by calling
2010 get_r_value_internal. */
2011 for (i = 1; i <= ins; ++i) {
2012 assert (block->in[i]);
2013 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2015 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2018 /* After collecting all predecessors into the array nin a new Phi node
2019 with these predecessors is created. This constructor contains an
2020 optimization: If all predecessors of the Phi node are identical it
2021 returns the only operand instead of a new Phi node. If the value
2022 passes two different control flow edges without being defined, and
2023 this is the second path treated, a pointer to the node that will be
2024 allocated for the first path (recursion) is returned. We already
2025 know the address of this node, as it is the next node to be allocated
2026 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2027 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2029 /* Now we now the value for "pos" and can enter it in the array with
2030 all known local variables. Attention: this might be a pointer to
2031 a node, that later will be allocated!!! See new_rd_Phi_in().
2032 If this is called in mature, after some set_value() in the same block,
2033 the proper value must not be overwritten:
2035 get_value (makes Phi0, put's it into graph_arr)
2036 set_value (overwrites Phi0 in graph_arr)
2037 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2040 if (!block->attr.block.graph_arr[pos]) {
2041 block->attr.block.graph_arr[pos] = res;
2043 /* printf(" value already computed by %s\n",
2044 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2051 * This function returns the last definition of a variable. In case
2052 * this variable was last defined in a previous block, Phi nodes are
2053 * inserted. If the part of the firm graph containing the definition
2054 * is not yet constructed, a dummy Phi node is returned.
2057 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2060 /* There are 4 cases to treat.
2062 1. The block is not mature and we visit it the first time. We can not
2063 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2064 predecessors is returned. This node is added to the linked list (field
2065 "link") of the containing block to be completed when this block is
2066 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2069 2. The value is already known in this block, graph_arr[pos] is set and we
2070 visit the block the first time. We can return the value without
2071 creating any new nodes.
2073 3. The block is mature and we visit it the first time. A Phi node needs
2074 to be created (phi_merge). If the Phi is not needed, as all it's
2075 operands are the same value reaching the block through different
2076 paths, it's optimized away and the value itself is returned.
2078 4. The block is mature, and we visit it the second time. Now two
2079 subcases are possible:
2080 * The value was computed completely the last time we were here. This
2081 is the case if there is no loop. We can return the proper value.
2082 * The recursion that visited this node and set the flag did not
2083 return yet. We are computing a value in a loop and need to
2084 break the recursion without knowing the result yet.
2085 @@@ strange case. Straight forward we would create a Phi before
2086 starting the computation of it's predecessors. In this case we will
2087 find a Phi here in any case. The problem is that this implementation
2088 only creates a Phi after computing the predecessors, so that it is
2089 hard to compute self references of this Phi. @@@
2090 There is no simple check for the second subcase. Therefore we check
2091 for a second visit and treat all such cases as the second subcase.
2092 Anyways, the basic situation is the same: we reached a block
2093 on two paths without finding a definition of the value: No Phi
2094 nodes are needed on both paths.
2095 We return this information "Two paths, no Phi needed" by a very tricky
2096 implementation that relies on the fact that an obstack is a stack and
2097 will return a node with the same address on different allocations.
2098 Look also at phi_merge and new_rd_phi_in to understand this.
2099 @@@ Unfortunately this does not work, see testprogram
2100 three_cfpred_example.
2104 /* case 4 -- already visited. */
2105 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2107 /* visited the first time */
2108 set_irn_visited(block, get_irg_visited(current_ir_graph));
2110 /* Get the local valid value */
2111 res = block->attr.block.graph_arr[pos];
2113 /* case 2 -- If the value is actually computed, return it. */
2114 if (res) return res;
2116 if (block->attr.block.matured) { /* case 3 */
2118 /* The Phi has the same amount of ins as the corresponding block. */
2119 int ins = get_irn_arity(block);
2121 NEW_ARR_A (ir_node *, nin, ins);
2123 /* Phi merge collects the predecessors and then creates a node. */
2124 res = phi_merge (block, pos, mode, nin, ins);
2126 } else { /* case 1 */
2127 /* The block is not mature, we don't know how many in's are needed. A Phi
2128 with zero predecessors is created. Such a Phi node is called Phi0
2129 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2130 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2132 The Phi0 has to remember the pos of it's internal value. If the real
2133 Phi is computed, pos is used to update the array with the local
2136 res = new_rd_Phi0 (current_ir_graph, block, mode);
2137 res->attr.phi0_pos = pos;
2138 res->link = block->link;
2142 /* If we get here, the frontend missed a use-before-definition error */
2145 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2146 assert (mode->code >= irm_F && mode->code <= irm_P);
2147 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2148 tarval_mode_null[mode->code]);
2151 /* The local valid value is available now. */
2152 block->attr.block.graph_arr[pos] = res;
2155 } /* get_r_value_internal */
2160 it starts the recursion. This causes an Id at the entry of
2161 every block that has no definition of the value! **/
2163 #if USE_EXPLICIT_PHI_IN_STACK
2165 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2166 void free_Phi_in_stack(Phi_in_stack *s) { }
2169 static INLINE ir_node *
2170 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2171 ir_node **in, int ins, ir_node *phi0)
2174 ir_node *res, *known;
2176 /* Allocate a new node on the obstack. The allocation copies the in
2178 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2179 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2181 /* This loop checks whether the Phi has more than one predecessor.
2182 If so, it is a real Phi node and we break the loop. Else the
2183 Phi node merges the same definition on several paths and therefore
2184 is not needed. Don't consider Bad nodes! */
2186 for (i=0; i < ins; ++i)
2190 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2192 /* Optimize self referencing Phis: We can't detect them yet properly, as
2193 they still refer to the Phi0 they will replace. So replace right now. */
2194 if (phi0 && in[i] == phi0) in[i] = res;
2196 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2204 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2207 edges_node_deleted(res, current_ir_graph);
2208 obstack_free (current_ir_graph->obst, res);
2209 if (is_Phi(known)) {
2210 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2211 order, an enclosing Phi know may get superfluous. */
2212 res = optimize_in_place_2(known);
2214 exchange(known, res);
2220 /* A undefined value, e.g., in unreachable code. */
2224 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2225 IRN_VRFY_IRG(res, irg);
2226 /* Memory Phis in endless loops must be kept alive.
2227 As we can't distinguish these easily we keep all of them alive. */
2228 if ((res->op == op_Phi) && (mode == mode_M))
2229 add_End_keepalive(get_irg_end(irg), res);
2233 } /* new_rd_Phi_in */
2236 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2238 #if PRECISE_EXC_CONTEXT
2240 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2243 * Construct a new frag_array for node n.
2244 * Copy the content from the current graph_arr of the corresponding block:
2245 * this is the current state.
2246 * Set ProjM(n) as current memory state.
2247 * Further the last entry in frag_arr of current block points to n. This
2248 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2250 static INLINE ir_node ** new_frag_arr(ir_node *n)
2255 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2256 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2257 sizeof(ir_node *)*current_ir_graph->n_loc);
2259 /* turn off optimization before allocating Proj nodes, as res isn't
2261 opt = get_opt_optimize(); set_optimize(0);
2262 /* Here we rely on the fact that all frag ops have Memory as first result! */
2263 if (get_irn_op(n) == op_Call)
2264 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2265 else if (get_irn_op(n) == op_CopyB)
2266 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2268 assert((pn_Quot_M == pn_DivMod_M) &&
2269 (pn_Quot_M == pn_Div_M) &&
2270 (pn_Quot_M == pn_Mod_M) &&
2271 (pn_Quot_M == pn_Load_M) &&
2272 (pn_Quot_M == pn_Store_M) &&
2273 (pn_Quot_M == pn_Alloc_M) &&
2274 (pn_Quot_M == pn_Bound_M));
2275 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2279 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2281 } /* new_frag_arr */
2284 * Returns the frag_arr from a node.
2286 static INLINE ir_node **get_frag_arr(ir_node *n) {
2287 switch (get_irn_opcode(n)) {
2289 return n->attr.call.exc.frag_arr;
2291 return n->attr.alloc.exc.frag_arr;
2293 return n->attr.load.exc.frag_arr;
2295 return n->attr.store.exc.frag_arr;
2297 return n->attr.except.frag_arr;
2299 } /* get_frag_arr */
2302 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2304 if (!frag_arr[pos]) frag_arr[pos] = val;
2305 if (frag_arr[current_ir_graph->n_loc - 1]) {
2306 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2307 assert(arr != frag_arr && "Endless recursion detected");
2308 set_frag_value(arr, pos, val);
2313 for (i = 0; i < 1000; ++i) {
2314 if (!frag_arr[pos]) {
2315 frag_arr[pos] = val;
2317 if (frag_arr[current_ir_graph->n_loc - 1]) {
2318 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2324 assert(0 && "potential endless recursion");
2326 } /* set_frag_value */
2329 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2333 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2335 frag_arr = get_frag_arr(cfOp);
2336 res = frag_arr[pos];
2338 if (block->attr.block.graph_arr[pos]) {
2339 /* There was a set_value() after the cfOp and no get_value before that
2340 set_value(). We must build a Phi node now. */
2341 if (block->attr.block.matured) {
2342 int ins = get_irn_arity(block);
2344 NEW_ARR_A (ir_node *, nin, ins);
2345 res = phi_merge(block, pos, mode, nin, ins);
2347 res = new_rd_Phi0 (current_ir_graph, block, mode);
2348 res->attr.phi0_pos = pos;
2349 res->link = block->link;
2353 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2354 but this should be better: (remove comment if this works) */
2355 /* It's a Phi, we can write this into all graph_arrs with NULL */
2356 set_frag_value(block->attr.block.graph_arr, pos, res);
2358 res = get_r_value_internal(block, pos, mode);
2359 set_frag_value(block->attr.block.graph_arr, pos, res);
2363 } /* get_r_frag_value_internal */
2364 #endif /* PRECISE_EXC_CONTEXT */
2367 * Computes the predecessors for the real phi node, and then
2368 * allocates and returns this node. The routine called to allocate the
2369 * node might optimize it away and return a real value.
2370 * This function must be called with an in-array of proper size.
2373 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2375 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2378 /* If this block has no value at pos create a Phi0 and remember it
2379 in graph_arr to break recursions.
2380 Else we may not set graph_arr as there a later value is remembered. */
2382 if (!block->attr.block.graph_arr[pos]) {
2383 if (block == get_irg_start_block(current_ir_graph)) {
2384 /* Collapsing to Bad tarvals is no good idea.
2385 So we call a user-supplied routine here that deals with this case as
2386 appropriate for the given language. Sorrily the only help we can give
2387 here is the position.
2389 Even if all variables are defined before use, it can happen that
2390 we get to the start block, if a Cond has been replaced by a tuple
2391 (bad, jmp). In this case we call the function needlessly, eventually
2392 generating an non existent error.
2393 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2396 if (default_initialize_local_variable) {
2397 ir_node *rem = get_cur_block();
2399 set_cur_block(block);
2400 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2404 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2405 /* We don't need to care about exception ops in the start block.
2406 There are none by definition. */
2407 return block->attr.block.graph_arr[pos];
2409 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2410 block->attr.block.graph_arr[pos] = phi0;
2411 #if PRECISE_EXC_CONTEXT
2412 if (get_opt_precise_exc_context()) {
2413 /* Set graph_arr for fragile ops. Also here we should break recursion.
2414 We could choose a cyclic path through an cfop. But the recursion would
2415 break at some point. */
2416 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2422 /* This loop goes to all predecessor blocks of the block the Phi node
2423 is in and there finds the operands of the Phi node by calling
2424 get_r_value_internal. */
2425 for (i = 1; i <= ins; ++i) {
2426 prevCfOp = skip_Proj(block->in[i]);
2428 if (is_Bad(prevCfOp)) {
2429 /* In case a Cond has been optimized we would get right to the start block
2430 with an invalid definition. */
2431 nin[i-1] = new_Bad();
2434 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2436 if (!is_Bad(prevBlock)) {
2437 #if PRECISE_EXC_CONTEXT
2438 if (get_opt_precise_exc_context() &&
2439 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2440 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2441 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2444 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2446 nin[i-1] = new_Bad();
2450 /* We want to pass the Phi0 node to the constructor: this finds additional
2451 optimization possibilities.
2452 The Phi0 node either is allocated in this function, or it comes from
2453 a former call to get_r_value_internal. In this case we may not yet
2454 exchange phi0, as this is done in mature_immBlock. */
2456 phi0_all = block->attr.block.graph_arr[pos];
2457 if (!((get_irn_op(phi0_all) == op_Phi) &&
2458 (get_irn_arity(phi0_all) == 0) &&
2459 (get_nodes_block(phi0_all) == block)))
2465 /* After collecting all predecessors into the array nin a new Phi node
2466 with these predecessors is created. This constructor contains an
2467 optimization: If all predecessors of the Phi node are identical it
2468 returns the only operand instead of a new Phi node. */
2469 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2471 /* In case we allocated a Phi0 node at the beginning of this procedure,
2472 we need to exchange this Phi0 with the real Phi. */
2474 exchange(phi0, res);
2475 block->attr.block.graph_arr[pos] = res;
2476 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2477 only an optimization. */
2484 * This function returns the last definition of a variable. In case
2485 * this variable was last defined in a previous block, Phi nodes are
2486 * inserted. If the part of the firm graph containing the definition
2487 * is not yet constructed, a dummy Phi node is returned.
2490 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2493 /* There are 4 cases to treat.
2495 1. The block is not mature and we visit it the first time. We can not
2496 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2497 predecessors is returned. This node is added to the linked list (field
2498 "link") of the containing block to be completed when this block is
2499 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2502 2. The value is already known in this block, graph_arr[pos] is set and we
2503 visit the block the first time. We can return the value without
2504 creating any new nodes.
2506 3. The block is mature and we visit it the first time. A Phi node needs
2507 to be created (phi_merge). If the Phi is not needed, as all it's
2508 operands are the same value reaching the block through different
2509 paths, it's optimized away and the value itself is returned.
2511 4. The block is mature, and we visit it the second time. Now two
2512 subcases are possible:
2513 * The value was computed completely the last time we were here. This
2514 is the case if there is no loop. We can return the proper value.
2515 * The recursion that visited this node and set the flag did not
2516 return yet. We are computing a value in a loop and need to
2517 break the recursion. This case only happens if we visited
2518 the same block with phi_merge before, which inserted a Phi0.
2519 So we return the Phi0.
2522 /* case 4 -- already visited. */
2523 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2524 /* As phi_merge allocates a Phi0 this value is always defined. Here
2525 is the critical difference of the two algorithms. */
2526 assert(block->attr.block.graph_arr[pos]);
2527 return block->attr.block.graph_arr[pos];
2530 /* visited the first time */
2531 set_irn_visited(block, get_irg_visited(current_ir_graph));
2533 /* Get the local valid value */
2534 res = block->attr.block.graph_arr[pos];
2536 /* case 2 -- If the value is actually computed, return it. */
2537 if (res) { return res; };
2539 if (block->attr.block.matured) { /* case 3 */
2541 /* The Phi has the same amount of ins as the corresponding block. */
2542 int ins = get_irn_arity(block);
2544 NEW_ARR_A (ir_node *, nin, ins);
2546 /* Phi merge collects the predecessors and then creates a node. */
2547 res = phi_merge (block, pos, mode, nin, ins);
2549 } else { /* case 1 */
2550 /* The block is not mature, we don't know how many in's are needed. A Phi
2551 with zero predecessors is created. Such a Phi node is called Phi0
2552 node. The Phi0 is then added to the list of Phi0 nodes in this block
2553 to be matured by mature_immBlock later.
2554 The Phi0 has to remember the pos of it's internal value. If the real
2555 Phi is computed, pos is used to update the array with the local
2557 res = new_rd_Phi0 (current_ir_graph, block, mode);
2558 res->attr.phi0_pos = pos;
2559 res->link = block->link;
2563 /* If we get here, the frontend missed a use-before-definition error */
2566 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2567 assert (mode->code >= irm_F && mode->code <= irm_P);
2568 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2569 get_mode_null(mode));
2572 /* The local valid value is available now. */
2573 block->attr.block.graph_arr[pos] = res;
2576 } /* get_r_value_internal */
2578 #endif /* USE_FAST_PHI_CONSTRUCTION */
2580 /* ************************************************************************** */
2583 * Finalize a Block node, when all control flows are known.
2584 * Acceptable parameters are only Block nodes.
2587 mature_immBlock(ir_node *block)
2593 assert (get_irn_opcode(block) == iro_Block);
2594 /* @@@ should be commented in
2595 assert (!get_Block_matured(block) && "Block already matured"); */
2597 if (!get_Block_matured(block)) {
2598 ins = ARR_LEN (block->in)-1;
2599 /* Fix block parameters */
2600 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2602 /* An array for building the Phi nodes. */
2603 NEW_ARR_A (ir_node *, nin, ins);
2605 /* Traverse a chain of Phi nodes attached to this block and mature
2607 for (n = block->link; n; n = next) {
2608 inc_irg_visited(current_ir_graph);
2610 exchange(n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2613 block->attr.block.matured = 1;
2615 /* Now, as the block is a finished firm node, we can optimize it.
2616 Since other nodes have been allocated since the block was created
2617 we can not free the node on the obstack. Therefore we have to call
2619 Unfortunately the optimization does not change a lot, as all allocated
2620 nodes refer to the unoptimized node.
2621 We can call _2, as global cse has no effect on blocks. */
2622 block = optimize_in_place_2(block);
2623 IRN_VRFY_IRG(block, current_ir_graph);
2625 } /* mature_immBlock */
2628 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2629 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2633 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2634 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2638 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2639 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2640 } /* new_d_Const_long */
2643 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2644 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2645 } /* new_d_Const_type */
2649 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2650 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2654 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2655 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2659 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2661 assert(arg->op == op_Cond);
2662 arg->attr.cond.kind = fragmentary;
2663 arg->attr.cond.default_proj = max_proj;
2664 res = new_Proj(arg, mode_X, max_proj);
2666 } /* new_d_defaultProj */
2669 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2670 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2674 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2675 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2676 } /* new_d_strictConv */
2679 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2680 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2684 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2685 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2694 * Allocate the frag array.
2696 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2697 if (get_opt_precise_exc_context()) {
2698 if ((current_ir_graph->phase_state == phase_building) &&
2699 (get_irn_op(res) == op) && /* Could be optimized away. */
2700 !*frag_store) /* Could be a cse where the arr is already set. */ {
2701 *frag_store = new_frag_arr(res);
2704 } /* allocate_frag_arr */
2707 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2709 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2);
2710 res->attr.except.pin_state = op_pin_state_pinned;
2711 #if PRECISE_EXC_CONTEXT
2712 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2719 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2721 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2);
2722 res->attr.except.pin_state = op_pin_state_pinned;
2723 #if PRECISE_EXC_CONTEXT
2724 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2728 } /* new_d_DivMod */
2731 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2734 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2);
2735 res->attr.except.pin_state = op_pin_state_pinned;
2736 #if PRECISE_EXC_CONTEXT
2737 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2744 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2746 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2);
2747 res->attr.except.pin_state = op_pin_state_pinned;
2748 #if PRECISE_EXC_CONTEXT
2749 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2768 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2769 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2773 new_d_Jmp(dbg_info *db) {
2774 return new_bd_Jmp(db, current_ir_graph->current_block);
2778 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2779 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2783 new_d_Cond(dbg_info *db, ir_node *c) {
2784 return new_bd_Cond(db, current_ir_graph->current_block, c);
2788 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2792 res = new_bd_Call(db, current_ir_graph->current_block,
2793 store, callee, arity, in, tp);
2794 #if PRECISE_EXC_CONTEXT
2795 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2802 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2803 return new_bd_Return(db, current_ir_graph->current_block,
2805 } /* new_d_Return */
2808 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2810 res = new_bd_Load(db, current_ir_graph->current_block,
2812 #if PRECISE_EXC_CONTEXT
2813 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2820 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2822 res = new_bd_Store(db, current_ir_graph->current_block,
2824 #if PRECISE_EXC_CONTEXT
2825 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2832 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2836 res = new_bd_Alloc(db, current_ir_graph->current_block,
2837 store, size, alloc_type, where);
2838 #if PRECISE_EXC_CONTEXT
2839 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2846 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2847 ir_node *size, ir_type *free_type, where_alloc where)
2849 return new_bd_Free(db, current_ir_graph->current_block,
2850 store, ptr, size, free_type, where);
2854 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2855 /* GL: objptr was called frame before. Frame was a bad choice for the name
2856 as the operand could as well be a pointer to a dynamic object. */
2858 return new_bd_Sel(db, current_ir_graph->current_block,
2859 store, objptr, 0, NULL, ent);
2860 } /* new_d_simpleSel */
2863 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel)
2865 return new_bd_Sel(db, current_ir_graph->current_block,
2866 store, objptr, n_index, index, sel);
2870 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2872 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2874 } /* new_d_SymConst_type */
2877 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind)
2879 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2880 value, kind, firm_unknown_type);
2881 } /* new_d_SymConst */
2884 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2885 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2891 return _new_d_Bad();
2895 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2896 return new_bd_Confirm(db, current_ir_graph->current_block,
2898 } /* new_d_Confirm */
2901 new_d_Unknown(ir_mode *m) {
2902 return new_bd_Unknown(m);
2903 } /* new_d_Unknown */
2906 new_d_CallBegin(dbg_info *db, ir_node *call) {
2907 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2908 } /* new_d_CallBegin */
2911 new_d_EndReg(dbg_info *db) {
2912 return new_bd_EndReg(db, current_ir_graph->current_block);
2913 } /* new_d_EndReg */
2916 new_d_EndExcept(dbg_info *db) {
2917 return new_bd_EndExcept(db, current_ir_graph->current_block);
2918 } /* new_d_EndExcept */
2921 new_d_Break(dbg_info *db) {
2922 return new_bd_Break(db, current_ir_graph->current_block);
2926 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2927 return new_bd_Filter (db, current_ir_graph->current_block,
2929 } /* new_d_Filter */
2932 (new_d_NoMem)(void) {
2933 return _new_d_NoMem();
2937 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2938 ir_node *ir_true, ir_mode *mode) {
2939 return new_bd_Mux(db, current_ir_graph->current_block,
2940 sel, ir_false, ir_true, mode);
2944 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2945 return new_bd_Psi(db, current_ir_graph->current_block,
2946 arity, conds, vals, mode);
2949 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2950 ir_node *dst, ir_node *src, ir_type *data_type) {
2952 res = new_bd_CopyB(db, current_ir_graph->current_block,
2953 store, dst, src, data_type);
2954 #if PRECISE_EXC_CONTEXT
2955 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2961 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2962 return new_bd_InstOf(db, current_ir_graph->current_block,
2963 store, objptr, type);
2964 } /* new_d_InstOf */
2967 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2968 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2971 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2972 ir_node *idx, ir_node *lower, ir_node *upper) {
2974 res = new_bd_Bound(db, current_ir_graph->current_block,
2975 store, idx, lower, upper);
2976 #if PRECISE_EXC_CONTEXT
2977 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2983 new_d_Pin(dbg_info *db, ir_node *node) {
2984 return new_bd_Pin(db, current_ir_graph->current_block, node);
2987 /* ********************************************************************* */
2988 /* Comfortable interface with automatic Phi node construction. */
2989 /* (Uses also constructors of ?? interface, except new_Block. */
2990 /* ********************************************************************* */
2992 /* Block construction */
2993 /* immature Block without predecessors */
2994 ir_node *new_d_immBlock(dbg_info *db) {
2997 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2998 /* creates a new dynamic in-array as length of in is -1 */
2999 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3000 current_ir_graph->current_block = res;
3001 res->attr.block.matured = 0;
3002 res->attr.block.dead = 0;
3003 /* res->attr.block.exc = exc_normal; */
3004 /* res->attr.block.handler_entry = 0; */
3005 res->attr.block.irg = current_ir_graph;
3006 res->attr.block.backedge = NULL;
3007 res->attr.block.in_cg = NULL;
3008 res->attr.block.cg_backedge = NULL;
3009 set_Block_block_visited(res, 0);
3011 /* Create and initialize array for Phi-node construction. */
3012 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3013 current_ir_graph->n_loc);
3014 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3016 /* Immature block may not be optimized! */
3017 IRN_VRFY_IRG(res, current_ir_graph);
3020 } /* new_d_immBlock */
3023 new_immBlock(void) {
3024 return new_d_immBlock(NULL);
3025 } /* new_immBlock */
3027 /* add an edge to a jmp/control flow node */
3029 add_immBlock_pred(ir_node *block, ir_node *jmp)
3031 if (block->attr.block.matured) {
3032 assert(0 && "Error: Block already matured!\n");
3035 int n = ARR_LEN(block->in) - 1;
3036 assert(jmp != NULL);
3037 ARR_APP1(ir_node *, block->in, jmp);
3039 hook_set_irn_n(block, n, jmp, NULL);
3041 } /* add_immBlock_pred */
3043 /* changing the current block */
3045 set_cur_block(ir_node *target) {
3046 current_ir_graph->current_block = target;
3047 } /* set_cur_block */
3049 /* ************************ */
3050 /* parameter administration */
3052 /* get a value from the parameter array from the current block by its index */
3054 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3055 ir_graph *irg = current_ir_graph;
3056 assert(get_irg_phase_state(irg) == phase_building);
3057 inc_irg_visited(irg);
3059 return get_r_value_internal(irg->current_block, pos + 1, mode);
3062 /* get a value from the parameter array from the current block by its index */
3064 get_value(int pos, ir_mode *mode) {
3065 return get_d_value(NULL, pos, mode);
3068 /* set a value at position pos in the parameter array from the current block */
3070 set_value(int pos, ir_node *value) {
3071 ir_graph *irg = current_ir_graph;
3072 assert(get_irg_phase_state(irg) == phase_building);
3073 assert(pos+1 < irg->n_loc);
3074 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3077 /* Find the value number for a node in the current block.*/
3079 find_value(ir_node *value) {
3081 ir_node *bl = current_ir_graph->current_block;
3083 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3084 if (bl->attr.block.graph_arr[i] == value)
3089 /* get the current store */
3092 ir_graph *irg = current_ir_graph;
3094 assert(get_irg_phase_state(irg) == phase_building);
3095 /* GL: one could call get_value instead */
3096 inc_irg_visited(irg);
3097 return get_r_value_internal(irg->current_block, 0, mode_M);
3100 /* set the current store: handles automatic Sync construction for Load nodes */
3102 set_store(ir_node *store)
3104 ir_node *load, *pload, *pred, *in[2];
3106 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3107 /* Beware: due to dead code elimination, a store might become a Bad node even in
3108 the construction phase. */
3109 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3111 if (get_opt_auto_create_sync()) {
3112 /* handle non-volatile Load nodes by automatically creating Sync's */
3113 load = skip_Proj(store);
3114 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3115 pred = get_Load_mem(load);
3117 if (is_Sync(pred)) {
3118 /* a Load after a Sync: move it up */
3119 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3121 set_Load_mem(load, get_memop_mem(mem));
3122 add_Sync_pred(pred, store);
3126 pload = skip_Proj(pred);
3127 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3128 /* a Load after a Load: create a new Sync */
3129 set_Load_mem(load, get_Load_mem(pload));
3133 store = new_Sync(2, in);
3138 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3142 keep_alive(ir_node *ka) {
3143 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3146 /* --- Useful access routines --- */
3147 /* Returns the current block of the current graph. To set the current
3148 block use set_cur_block. */
3149 ir_node *get_cur_block(void) {
3150 return get_irg_current_block(current_ir_graph);
3151 } /* get_cur_block */
3153 /* Returns the frame type of the current graph */
3154 ir_type *get_cur_frame_type(void) {
3155 return get_irg_frame_type(current_ir_graph);
3156 } /* get_cur_frame_type */
3159 /* ********************************************************************* */
3162 /* call once for each run of the library */
3164 init_cons(uninitialized_local_variable_func_t *func) {
3165 default_initialize_local_variable = func;
3169 irp_finalize_cons(void) {
3171 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3172 irg_finalize_cons(get_irp_irg(i));
3174 irp->phase_state = phase_high;
3175 } /* irp_finalize_cons */
3178 ir_node *new_Block(int arity, ir_node **in) {
3179 return new_d_Block(NULL, arity, in);
3181 ir_node *new_Start (void) {
3182 return new_d_Start(NULL);
3184 ir_node *new_End (void) {
3185 return new_d_End(NULL);
3187 ir_node *new_Jmp (void) {
3188 return new_d_Jmp(NULL);
3190 ir_node *new_IJmp (ir_node *tgt) {
3191 return new_d_IJmp(NULL, tgt);
3193 ir_node *new_Cond (ir_node *c) {
3194 return new_d_Cond(NULL, c);
3196 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3197 return new_d_Return(NULL, store, arity, in);
3199 ir_node *new_Const (ir_mode *mode, tarval *con) {
3200 return new_d_Const(NULL, mode, con);
3203 ir_node *new_Const_long(ir_mode *mode, long value)
3205 return new_d_Const_long(NULL, mode, value);
3208 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3209 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3212 ir_node *new_SymConst_type (symconst_symbol value, symconst_kind kind, ir_type *type) {
3213 return new_d_SymConst_type(NULL, value, kind, type);
3215 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3216 return new_d_SymConst(NULL, value, kind);
3218 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3219 return new_d_simpleSel(NULL, store, objptr, ent);
3221 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3223 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3225 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3227 return new_d_Call(NULL, store, callee, arity, in, tp);
3229 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3230 return new_d_Add(NULL, op1, op2, mode);
3232 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3233 return new_d_Sub(NULL, op1, op2, mode);
3235 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3236 return new_d_Minus(NULL, op, mode);
3238 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3239 return new_d_Mul(NULL, op1, op2, mode);
3241 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3242 return new_d_Quot(NULL, memop, op1, op2);
3244 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3245 return new_d_DivMod(NULL, memop, op1, op2);
3247 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3248 return new_d_Div(NULL, memop, op1, op2);
3250 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3251 return new_d_Mod(NULL, memop, op1, op2);
3253 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3254 return new_d_Abs(NULL, op, mode);
3256 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3257 return new_d_And(NULL, op1, op2, mode);
3259 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3260 return new_d_Or(NULL, op1, op2, mode);
3262 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3263 return new_d_Eor(NULL, op1, op2, mode);
3265 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3266 return new_d_Not(NULL, op, mode);
3268 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3269 return new_d_Shl(NULL, op, k, mode);
3271 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3272 return new_d_Shr(NULL, op, k, mode);
3274 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3275 return new_d_Shrs(NULL, op, k, mode);
3277 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3278 return new_d_Rot(NULL, op, k, mode);
3280 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3281 return new_d_Carry(NULL, op1, op2, mode);
3283 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3284 return new_d_Borrow(NULL, op1, op2, mode);
3286 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3287 return new_d_Cmp(NULL, op1, op2);
3289 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3290 return new_d_Conv(NULL, op, mode);
3292 ir_node *new_strictConv (ir_node *op, ir_mode *mode) {
3293 return new_d_strictConv(NULL, op, mode);
3295 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3296 return new_d_Cast(NULL, op, to_tp);
3298 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3299 return new_d_Phi(NULL, arity, in, mode);
3301 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3302 return new_d_Load(NULL, store, addr, mode);
3304 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3305 return new_d_Store(NULL, store, addr, val);
3307 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3308 where_alloc where) {
3309 return new_d_Alloc(NULL, store, size, alloc_type, where);
3311 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3312 ir_type *free_type, where_alloc where) {
3313 return new_d_Free(NULL, store, ptr, size, free_type, where);
3315 ir_node *new_Sync (int arity, ir_node *in[]) {
3316 return new_d_Sync(NULL, arity, in);
3318 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3319 return new_d_Proj(NULL, arg, mode, proj);
3321 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3322 return new_d_defaultProj(NULL, arg, max_proj);
3324 ir_node *new_Tuple (int arity, ir_node **in) {
3325 return new_d_Tuple(NULL, arity, in);
3327 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3328 return new_d_Id(NULL, val, mode);
3330 ir_node *new_Bad (void) {
3333 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3334 return new_d_Confirm (NULL, val, bound, cmp);
3336 ir_node *new_Unknown(ir_mode *m) {
3337 return new_d_Unknown(m);
3339 ir_node *new_CallBegin (ir_node *callee) {
3340 return new_d_CallBegin(NULL, callee);
3342 ir_node *new_EndReg (void) {
3343 return new_d_EndReg(NULL);
3345 ir_node *new_EndExcept (void) {
3346 return new_d_EndExcept(NULL);
3348 ir_node *new_Break (void) {
3349 return new_d_Break(NULL);
3351 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3352 return new_d_Filter(NULL, arg, mode, proj);
3354 ir_node *new_NoMem (void) {
3355 return new_d_NoMem();
3357 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3358 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3360 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3361 return new_d_Psi(NULL, arity, conds, vals, mode);
3363 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3364 return new_d_CopyB(NULL, store, dst, src, data_type);
3366 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3367 return new_d_InstOf (NULL, store, objptr, ent);
3369 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3370 return new_d_Raise(NULL, store, obj);
3372 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3373 return new_d_Bound(NULL, store, idx, lower, upper);
3375 ir_node *new_Pin(ir_node *node) {
3376 return new_d_Pin(NULL, node);