3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler, Michael Beck
10 * Copyright: (c) 1998-2006 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
19 #include "irgraph_t.h"
23 #include "firm_common_t.h"
30 #include "irbackedge_t.h"
32 #include "iredges_t.h"
36 #if USE_EXPLICIT_PHI_IN_STACK
37 /* A stack needed for the automatic Phi node construction in constructor
38 Phi_in. Redefinition in irgraph.c!! */
43 typedef struct Phi_in_stack Phi_in_stack;
46 /* when we need verifying */
48 # define IRN_VRFY_IRG(res, irg)
50 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
54 * Language dependent variable initialization callback.
56 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
58 /* creates a bd constructor for a binop */
59 #define NEW_BD_BINOP(instr) \
61 new_bd_##instr(dbg_info *db, ir_node *block, \
62 ir_node *op1, ir_node *op2, ir_mode *mode) \
66 ir_graph *irg = current_ir_graph; \
69 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
70 res = optimize_node(res); \
71 IRN_VRFY_IRG(res, irg); \
75 /* creates a bd constructor for an unop */
76 #define NEW_BD_UNOP(instr) \
78 new_bd_##instr(dbg_info *db, ir_node *block, \
79 ir_node *op, ir_mode *mode) \
82 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an divop */
90 #define NEW_BD_DIVOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *memop, ir_node *op1, ir_node *op2) \
97 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
102 res = optimize_node(res); \
103 IRN_VRFY_IRG(res, irg); \
107 /* creates a rd constructor for a binop */
108 #define NEW_RD_BINOP(instr) \
110 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
111 ir_node *op1, ir_node *op2, ir_mode *mode) \
114 ir_graph *rem = current_ir_graph; \
115 current_ir_graph = irg; \
116 res = new_bd_##instr(db, block, op1, op2, mode); \
117 current_ir_graph = rem; \
121 /* creates a rd constructor for an unop */
122 #define NEW_RD_UNOP(instr) \
124 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
125 ir_node *op, ir_mode *mode) \
128 ir_graph *rem = current_ir_graph; \
129 current_ir_graph = irg; \
130 res = new_bd_##instr(db, block, op, mode); \
131 current_ir_graph = rem; \
135 /* creates a rd constructor for an divop */
136 #define NEW_RD_DIVOP(instr) \
138 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
139 ir_node *memop, ir_node *op1, ir_node *op2) \
142 ir_graph *rem = current_ir_graph; \
143 current_ir_graph = irg; \
144 res = new_bd_##instr(db, block, memop, op1, op2); \
145 current_ir_graph = rem; \
149 /* creates a d constructor for an binop */
150 #define NEW_D_BINOP(instr) \
152 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
153 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
156 /* creates a d constructor for an unop */
157 #define NEW_D_UNOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
165 * Constructs a Block with a fixed number of predecessors.
166 * Does not set current_block. Can not be used with automatic
167 * Phi node construction.
170 new_bd_Block(dbg_info *db, int arity, ir_node **in)
173 ir_graph *irg = current_ir_graph;
175 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
176 set_Block_matured(res, 1);
177 set_Block_block_visited(res, 0);
179 /* res->attr.block.exc = exc_normal; */
180 /* res->attr.block.handler_entry = 0; */
181 res->attr.block.dead = 0;
182 res->attr.block.irg = irg;
183 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
184 res->attr.block.in_cg = NULL;
185 res->attr.block.cg_backedge = NULL;
186 res->attr.block.extblk = NULL;
188 IRN_VRFY_IRG(res, irg);
193 new_bd_Start(dbg_info *db, ir_node *block)
196 ir_graph *irg = current_ir_graph;
198 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
199 /* res->attr.start.irg = irg; */
201 IRN_VRFY_IRG(res, irg);
206 new_bd_End(dbg_info *db, ir_node *block)
209 ir_graph *irg = current_ir_graph;
211 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
213 IRN_VRFY_IRG(res, irg);
218 * Creates a Phi node with all predecessors. Calling this constructor
219 * is only allowed if the corresponding block is mature.
222 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
225 ir_graph *irg = current_ir_graph;
229 /* Don't assert that block matured: the use of this constructor is strongly
231 if ( get_Block_matured(block) )
232 assert( get_irn_arity(block) == arity );
234 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
236 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
238 for (i = arity-1; i >= 0; i--)
239 if (get_irn_op(in[i]) == op_Unknown) {
244 if (!has_unknown) res = optimize_node (res);
245 IRN_VRFY_IRG(res, irg);
247 /* Memory Phis in endless loops must be kept alive.
248 As we can't distinguish these easily we keep all of them alive. */
249 if ((res->op == op_Phi) && (mode == mode_M))
250 add_End_keepalive(get_irg_end(irg), res);
255 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
258 ir_graph *irg = current_ir_graph;
260 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
261 res->attr.con.tv = con;
262 set_Const_type(res, tp); /* Call method because of complex assertion. */
263 res = optimize_node (res);
264 assert(get_Const_type(res) == tp);
265 IRN_VRFY_IRG(res, irg);
268 } /* new_bd_Const_type */
271 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
273 ir_graph *irg = current_ir_graph;
275 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
279 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value)
281 ir_graph *irg = current_ir_graph;
283 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
284 } /* new_bd_Const_long */
287 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
290 ir_graph *irg = current_ir_graph;
292 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
293 res = optimize_node(res);
294 IRN_VRFY_IRG(res, irg);
299 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
303 ir_graph *irg = current_ir_graph;
305 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
306 res->attr.proj = proj;
309 assert(get_Proj_pred(res));
310 assert(get_nodes_block(get_Proj_pred(res)));
312 res = optimize_node(res);
314 IRN_VRFY_IRG(res, irg);
320 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
324 ir_graph *irg = current_ir_graph;
326 assert(arg->op == op_Cond);
327 arg->attr.cond.kind = fragmentary;
328 arg->attr.cond.default_proj = max_proj;
329 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
331 } /* new_bd_defaultProj */
334 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag)
337 ir_graph *irg = current_ir_graph;
339 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
340 res->attr.conv.strict = strict_flag;
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
347 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
350 ir_graph *irg = current_ir_graph;
352 assert(is_atomic_type(to_tp));
354 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
355 res->attr.cast.totype = to_tp;
356 res = optimize_node(res);
357 IRN_VRFY_IRG(res, irg);
362 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in)
365 ir_graph *irg = current_ir_graph;
367 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
368 res = optimize_node (res);
369 IRN_VRFY_IRG(res, irg);
394 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
398 ir_graph *irg = current_ir_graph;
401 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
402 res = optimize_node(res);
403 IRN_VRFY_IRG(res, irg);
408 new_bd_Jmp(dbg_info *db, ir_node *block)
411 ir_graph *irg = current_ir_graph;
413 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
414 res = optimize_node (res);
415 IRN_VRFY_IRG (res, irg);
420 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt)
423 ir_graph *irg = current_ir_graph;
425 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
426 res = optimize_node (res);
427 IRN_VRFY_IRG (res, irg);
429 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
435 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c)
438 ir_graph *irg = current_ir_graph;
440 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
441 res->attr.cond.kind = dense;
442 res->attr.cond.default_proj = 0;
443 res->attr.cond.pred = COND_JMP_PRED_NONE;
444 res = optimize_node (res);
445 IRN_VRFY_IRG(res, irg);
450 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
451 ir_node *callee, int arity, ir_node **in, ir_type *tp)
456 ir_graph *irg = current_ir_graph;
459 NEW_ARR_A(ir_node *, r_in, r_arity);
462 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
464 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
466 assert((get_unknown_type() == tp) || is_Method_type(tp));
467 set_Call_type(res, tp);
468 res->attr.call.exc.pin_state = op_pin_state_pinned;
469 res->attr.call.callee_arr = NULL;
470 res = optimize_node(res);
471 IRN_VRFY_IRG(res, irg);
476 new_bd_Return(dbg_info *db, ir_node *block,
477 ir_node *store, int arity, ir_node **in)
482 ir_graph *irg = current_ir_graph;
485 NEW_ARR_A (ir_node *, r_in, r_arity);
487 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
489 res = optimize_node(res);
490 IRN_VRFY_IRG(res, irg);
495 new_bd_Load(dbg_info *db, ir_node *block,
496 ir_node *store, ir_node *adr, ir_mode *mode)
500 ir_graph *irg = current_ir_graph;
504 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
505 res->attr.load.exc.pin_state = op_pin_state_pinned;
506 res->attr.load.load_mode = mode;
507 res->attr.load.volatility = volatility_non_volatile;
508 res = optimize_node(res);
509 IRN_VRFY_IRG(res, irg);
514 new_bd_Store(dbg_info *db, ir_node *block,
515 ir_node *store, ir_node *adr, ir_node *val)
519 ir_graph *irg = current_ir_graph;
524 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
525 res->attr.store.exc.pin_state = op_pin_state_pinned;
526 res->attr.store.volatility = volatility_non_volatile;
527 res = optimize_node(res);
528 IRN_VRFY_IRG(res, irg);
533 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
534 ir_node *size, ir_type *alloc_type, where_alloc where)
538 ir_graph *irg = current_ir_graph;
542 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
543 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
544 res->attr.alloc.where = where;
545 res->attr.alloc.type = alloc_type;
546 res = optimize_node(res);
547 IRN_VRFY_IRG(res, irg);
552 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
553 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
557 ir_graph *irg = current_ir_graph;
562 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
563 res->attr.free.where = where;
564 res->attr.free.type = free_type;
565 res = optimize_node(res);
566 IRN_VRFY_IRG(res, irg);
571 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
572 int arity, ir_node **in, ir_entity *ent)
577 ir_graph *irg = current_ir_graph;
579 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
582 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
585 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
587 * FIXM: Sel's can select functions which should be of mode mode_P_code.
589 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
590 res->attr.sel.ent = ent;
591 res = optimize_node(res);
592 IRN_VRFY_IRG(res, irg);
597 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
598 symconst_kind symkind, ir_type *tp) {
601 ir_graph *irg = current_ir_graph;
603 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
604 mode = mode_P_data; /* FIXME: can be mode_P_code */
608 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
610 res->attr.symc.num = symkind;
611 res->attr.symc.sym = value;
612 res->attr.symc.tp = tp;
614 res = optimize_node(res);
615 IRN_VRFY_IRG(res, irg);
617 } /* new_bd_SymConst_type */
620 new_bd_Sync(dbg_info *db, ir_node *block)
623 ir_graph *irg = current_ir_graph;
625 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
626 /* no need to call optimize node here, Sync are always created with no predecessors */
627 IRN_VRFY_IRG(res, irg);
632 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
634 ir_node *in[2], *res;
635 ir_graph *irg = current_ir_graph;
639 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
640 res->attr.confirm_cmp = cmp;
641 res = optimize_node (res);
642 IRN_VRFY_IRG(res, irg);
646 /* this function is often called with current_ir_graph unset */
648 new_bd_Unknown(ir_mode *m)
651 ir_graph *irg = current_ir_graph;
653 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
654 res = optimize_node(res);
656 } /* new_bd_Unknown */
659 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call)
663 ir_graph *irg = current_ir_graph;
665 in[0] = get_Call_ptr(call);
666 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
667 /* res->attr.callbegin.irg = irg; */
668 res->attr.callbegin.call = call;
669 res = optimize_node(res);
670 IRN_VRFY_IRG(res, irg);
672 } /* new_bd_CallBegin */
675 new_bd_EndReg(dbg_info *db, ir_node *block)
678 ir_graph *irg = current_ir_graph;
680 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
681 set_irg_end_reg(irg, res);
682 IRN_VRFY_IRG(res, irg);
684 } /* new_bd_EndReg */
687 new_bd_EndExcept(dbg_info *db, ir_node *block)
690 ir_graph *irg = current_ir_graph;
692 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
693 set_irg_end_except(irg, res);
694 IRN_VRFY_IRG (res, irg);
696 } /* new_bd_EndExcept */
699 new_bd_Break(dbg_info *db, ir_node *block)
702 ir_graph *irg = current_ir_graph;
704 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
705 res = optimize_node(res);
706 IRN_VRFY_IRG(res, irg);
711 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
715 ir_graph *irg = current_ir_graph;
717 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
718 res->attr.filter.proj = proj;
719 res->attr.filter.in_cg = NULL;
720 res->attr.filter.backedge = NULL;
723 assert(get_Proj_pred(res));
724 assert(get_nodes_block(get_Proj_pred(res)));
726 res = optimize_node(res);
727 IRN_VRFY_IRG(res, irg);
729 } /* new_bd_Filter */
732 new_bd_Mux(dbg_info *db, ir_node *block,
733 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
737 ir_graph *irg = current_ir_graph;
743 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
746 res = optimize_node(res);
747 IRN_VRFY_IRG(res, irg);
752 new_bd_Psi(dbg_info *db, ir_node *block,
753 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
757 ir_graph *irg = current_ir_graph;
760 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
762 for (i = 0; i < arity; ++i) {
764 in[2 * i + 1] = vals[i];
768 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
771 res = optimize_node(res);
772 IRN_VRFY_IRG(res, irg);
777 new_bd_CopyB(dbg_info *db, ir_node *block,
778 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
782 ir_graph *irg = current_ir_graph;
788 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
790 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
791 res->attr.copyb.data_type = data_type;
792 res = optimize_node(res);
793 IRN_VRFY_IRG(res, irg);
798 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
799 ir_node *objptr, ir_type *type)
803 ir_graph *irg = current_ir_graph;
807 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
808 res->attr.instof.type = type;
809 res = optimize_node(res);
810 IRN_VRFY_IRG(res, irg);
812 } /* new_bd_InstOf */
815 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
819 ir_graph *irg = current_ir_graph;
823 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
830 new_bd_Bound(dbg_info *db, ir_node *block,
831 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
835 ir_graph *irg = current_ir_graph;
841 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
842 res->attr.bound.exc.pin_state = op_pin_state_pinned;
843 res = optimize_node(res);
844 IRN_VRFY_IRG(res, irg);
849 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node)
852 ir_graph *irg = current_ir_graph;
854 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
855 res = optimize_node(res);
856 IRN_VRFY_IRG(res, irg);
860 /* --------------------------------------------- */
861 /* private interfaces, for professional use only */
862 /* --------------------------------------------- */
864 /* Constructs a Block with a fixed number of predecessors.
865 Does not set current_block. Can not be used with automatic
866 Phi node construction. */
868 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in)
870 ir_graph *rem = current_ir_graph;
873 current_ir_graph = irg;
874 res = new_bd_Block(db, arity, in);
875 current_ir_graph = rem;
881 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
883 ir_graph *rem = current_ir_graph;
886 current_ir_graph = irg;
887 res = new_bd_Start(db, block);
888 current_ir_graph = rem;
894 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
897 ir_graph *rem = current_ir_graph;
899 current_ir_graph = rem;
900 res = new_bd_End(db, block);
901 current_ir_graph = rem;
906 /* Creates a Phi node with all predecessors. Calling this constructor
907 is only allowed if the corresponding block is mature. */
909 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Phi(db, block,arity, in, mode);
916 current_ir_graph = rem;
922 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
925 ir_graph *rem = current_ir_graph;
927 current_ir_graph = irg;
928 res = new_bd_Const_type(db, block, mode, con, tp);
929 current_ir_graph = rem;
932 } /* new_rd_Const_type */
935 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
938 ir_graph *rem = current_ir_graph;
940 current_ir_graph = irg;
941 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
942 current_ir_graph = rem;
948 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
950 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
951 } /* new_rd_Const_long */
954 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
957 ir_graph *rem = current_ir_graph;
959 current_ir_graph = irg;
960 res = new_bd_Id(db, block, val, mode);
961 current_ir_graph = rem;
967 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
971 ir_graph *rem = current_ir_graph;
973 current_ir_graph = irg;
974 res = new_bd_Proj(db, block, arg, mode, proj);
975 current_ir_graph = rem;
981 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
985 ir_graph *rem = current_ir_graph;
987 current_ir_graph = irg;
988 res = new_bd_defaultProj(db, block, arg, max_proj);
989 current_ir_graph = rem;
992 } /* new_rd_defaultProj */
995 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
998 ir_graph *rem = current_ir_graph;
1000 current_ir_graph = irg;
1001 res = new_bd_Conv(db, block, op, mode, 0);
1002 current_ir_graph = rem;
1008 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1011 ir_graph *rem = current_ir_graph;
1013 current_ir_graph = irg;
1014 res = new_bd_Cast(db, block, op, to_tp);
1015 current_ir_graph = rem;
1021 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1024 ir_graph *rem = current_ir_graph;
1026 current_ir_graph = irg;
1027 res = new_bd_Tuple(db, block, arity, in);
1028 current_ir_graph = rem;
1031 } /* new_rd_Tuple */
1038 NEW_RD_DIVOP(DivMod)
1051 NEW_RD_BINOP(Borrow)
1054 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1055 ir_node *op1, ir_node *op2)
1058 ir_graph *rem = current_ir_graph;
1060 current_ir_graph = irg;
1061 res = new_bd_Cmp(db, block, op1, op2);
1062 current_ir_graph = rem;
1068 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block)
1071 ir_graph *rem = current_ir_graph;
1073 current_ir_graph = irg;
1074 res = new_bd_Jmp(db, block);
1075 current_ir_graph = rem;
1081 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1084 ir_graph *rem = current_ir_graph;
1086 current_ir_graph = irg;
1087 res = new_bd_IJmp(db, block, tgt);
1088 current_ir_graph = rem;
1094 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1097 ir_graph *rem = current_ir_graph;
1099 current_ir_graph = irg;
1100 res = new_bd_Cond(db, block, c);
1101 current_ir_graph = rem;
1107 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1108 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1115 current_ir_graph = rem;
1121 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1122 ir_node *store, int arity, ir_node **in)
1125 ir_graph *rem = current_ir_graph;
1127 current_ir_graph = irg;
1128 res = new_bd_Return(db, block, store, arity, in);
1129 current_ir_graph = rem;
1132 } /* new_rd_Return */
1135 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1136 ir_node *store, ir_node *adr, ir_mode *mode)
1139 ir_graph *rem = current_ir_graph;
1141 current_ir_graph = irg;
1142 res = new_bd_Load(db, block, store, adr, mode);
1143 current_ir_graph = rem;
1149 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1150 ir_node *store, ir_node *adr, ir_node *val)
1153 ir_graph *rem = current_ir_graph;
1155 current_ir_graph = irg;
1156 res = new_bd_Store(db, block, store, adr, val);
1157 current_ir_graph = rem;
1160 } /* new_rd_Store */
1163 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1164 ir_node *size, ir_type *alloc_type, where_alloc where)
1167 ir_graph *rem = current_ir_graph;
1169 current_ir_graph = irg;
1170 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1171 current_ir_graph = rem;
1174 } /* new_rd_Alloc */
1177 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1178 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1181 ir_graph *rem = current_ir_graph;
1183 current_ir_graph = irg;
1184 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1185 current_ir_graph = rem;
1191 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1192 ir_node *store, ir_node *objptr, ir_entity *ent)
1195 ir_graph *rem = current_ir_graph;
1197 current_ir_graph = irg;
1198 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1199 current_ir_graph = rem;
1202 } /* new_rd_simpleSel */
1205 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1206 int arity, ir_node **in, ir_entity *ent)
1209 ir_graph *rem = current_ir_graph;
1211 current_ir_graph = irg;
1212 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1213 current_ir_graph = rem;
1219 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1220 symconst_kind symkind, ir_type *tp)
1223 ir_graph *rem = current_ir_graph;
1225 current_ir_graph = irg;
1226 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1227 current_ir_graph = rem;
1230 } /* new_rd_SymConst_type */
1233 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1234 symconst_kind symkind)
1236 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1238 } /* new_rd_SymConst */
1240 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp)
1242 symconst_symbol sym;
1243 sym.entity_p = symbol;
1244 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1245 } /* new_rd_SymConst_addr_ent */
1247 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp)
1249 symconst_symbol sym;
1250 sym.entity_p = symbol;
1251 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1252 } /* new_rd_SymConst_ofs_ent */
1254 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1255 symconst_symbol sym;
1256 sym.ident_p = symbol;
1257 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1258 } /* new_rd_SymConst_addr_name */
1260 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1261 symconst_symbol sym;
1262 sym.type_p = symbol;
1263 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1264 } /* new_rd_SymConst_type_tag */
1266 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1267 symconst_symbol sym;
1268 sym.type_p = symbol;
1269 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1270 } /* new_rd_SymConst_size */
1272 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1273 symconst_symbol sym;
1274 sym.type_p = symbol;
1275 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1276 } /* new_rd_SymConst_align */
1279 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1282 ir_graph *rem = current_ir_graph;
1285 current_ir_graph = irg;
1286 res = new_bd_Sync(db, block);
1287 current_ir_graph = rem;
1289 for (i = 0; i < arity; ++i)
1290 add_Sync_pred(res, in[i]);
1296 new_rd_Bad(ir_graph *irg) {
1297 return get_irg_bad(irg);
1301 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1304 ir_graph *rem = current_ir_graph;
1306 current_ir_graph = irg;
1307 res = new_bd_Confirm(db, block, val, bound, cmp);
1308 current_ir_graph = rem;
1311 } /* new_rd_Confirm */
1313 /* this function is often called with current_ir_graph unset */
1315 new_rd_Unknown(ir_graph *irg, ir_mode *m)
1318 ir_graph *rem = current_ir_graph;
1320 current_ir_graph = irg;
1321 res = new_bd_Unknown(m);
1322 current_ir_graph = rem;
1325 } /* new_rd_Unknown */
1328 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1331 ir_graph *rem = current_ir_graph;
1333 current_ir_graph = irg;
1334 res = new_bd_CallBegin(db, block, call);
1335 current_ir_graph = rem;
1338 } /* new_rd_CallBegin */
1341 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
1345 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1346 set_irg_end_reg(irg, res);
1347 IRN_VRFY_IRG(res, irg);
1349 } /* new_rd_EndReg */
1352 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
1356 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1357 set_irg_end_except(irg, res);
1358 IRN_VRFY_IRG (res, irg);
1360 } /* new_rd_EndExcept */
1363 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block)
1366 ir_graph *rem = current_ir_graph;
1368 current_ir_graph = irg;
1369 res = new_bd_Break(db, block);
1370 current_ir_graph = rem;
1373 } /* new_rd_Break */
1376 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1380 ir_graph *rem = current_ir_graph;
1382 current_ir_graph = irg;
1383 res = new_bd_Filter(db, block, arg, mode, proj);
1384 current_ir_graph = rem;
1387 } /* new_rd_Filter */
1390 new_rd_NoMem(ir_graph *irg) {
1391 return get_irg_no_mem(irg);
1392 } /* new_rd_NoMem */
1395 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1396 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1399 ir_graph *rem = current_ir_graph;
1401 current_ir_graph = irg;
1402 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1403 current_ir_graph = rem;
1409 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1410 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1413 ir_graph *rem = current_ir_graph;
1415 current_ir_graph = irg;
1416 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1417 current_ir_graph = rem;
1422 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1423 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1426 ir_graph *rem = current_ir_graph;
1428 current_ir_graph = irg;
1429 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1430 current_ir_graph = rem;
1433 } /* new_rd_CopyB */
1436 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1437 ir_node *objptr, ir_type *type)
1440 ir_graph *rem = current_ir_graph;
1442 current_ir_graph = irg;
1443 res = new_bd_InstOf(db, block, store, objptr, type);
1444 current_ir_graph = rem;
1447 } /* new_rd_InstOf */
1450 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1453 ir_graph *rem = current_ir_graph;
1455 current_ir_graph = irg;
1456 res = new_bd_Raise(db, block, store, obj);
1457 current_ir_graph = rem;
1460 } /* new_rd_Raise */
1462 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1463 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1466 ir_graph *rem = current_ir_graph;
1468 current_ir_graph = irg;
1469 res = new_bd_Bound(db, block, store, idx, lower, upper);
1470 current_ir_graph = rem;
1473 } /* new_rd_Bound */
1475 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node)
1478 ir_graph *rem = current_ir_graph;
1480 current_ir_graph = irg;
1481 res = new_bd_Pin(db, block, node);
1482 current_ir_graph = rem;
1487 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1488 return new_rd_Block(NULL, irg, arity, in);
1490 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1491 return new_rd_Start(NULL, irg, block);
1493 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1494 return new_rd_End(NULL, irg, block);
1496 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1497 return new_rd_Jmp(NULL, irg, block);
1499 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1500 return new_rd_IJmp(NULL, irg, block, tgt);
1502 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1503 return new_rd_Cond(NULL, irg, block, c);
1505 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1506 ir_node *store, int arity, ir_node **in) {
1507 return new_rd_Return(NULL, irg, block, store, arity, in);
1509 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1510 ir_mode *mode, tarval *con) {
1511 return new_rd_Const(NULL, irg, block, mode, con);
1513 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1514 ir_mode *mode, long value) {
1515 return new_rd_Const_long(NULL, irg, block, mode, value);
1517 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1518 ir_mode *mode, tarval *con, ir_type *tp) {
1519 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1521 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1522 symconst_symbol value, symconst_kind symkind) {
1523 return new_rd_SymConst(NULL, irg, block, value, symkind);
1525 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1526 ir_node *objptr, ir_entity *ent) {
1527 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1529 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1530 ir_node *objptr, int n_index, ir_node **index,
1532 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1534 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1535 ir_node *callee, int arity, ir_node **in,
1537 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1539 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1540 ir_node *op1, ir_node *op2, ir_mode *mode) {
1541 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1543 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1544 ir_node *op1, ir_node *op2, ir_mode *mode) {
1545 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1547 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1548 ir_node *op, ir_mode *mode) {
1549 return new_rd_Minus(NULL, irg, block, op, mode);
1551 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1552 ir_node *op1, ir_node *op2, ir_mode *mode) {
1553 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1555 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1556 ir_node *memop, ir_node *op1, ir_node *op2) {
1557 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1559 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1560 ir_node *memop, ir_node *op1, ir_node *op2) {
1561 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1563 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1564 ir_node *memop, ir_node *op1, ir_node *op2) {
1565 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1567 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1568 ir_node *memop, ir_node *op1, ir_node *op2) {
1569 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1571 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1572 ir_node *op, ir_mode *mode) {
1573 return new_rd_Abs(NULL, irg, block, op, mode);
1575 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1576 ir_node *op1, ir_node *op2, ir_mode *mode) {
1577 return new_rd_And(NULL, irg, block, op1, op2, mode);
1579 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1580 ir_node *op1, ir_node *op2, ir_mode *mode) {
1581 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1583 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1584 ir_node *op1, ir_node *op2, ir_mode *mode) {
1585 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1587 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1588 ir_node *op, ir_mode *mode) {
1589 return new_rd_Not(NULL, irg, block, op, mode);
1591 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1592 ir_node *op, ir_node *k, ir_mode *mode) {
1593 return new_rd_Shl(NULL, irg, block, op, k, mode);
1595 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1596 ir_node *op, ir_node *k, ir_mode *mode) {
1597 return new_rd_Shr(NULL, irg, block, op, k, mode);
1599 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1600 ir_node *op, ir_node *k, ir_mode *mode) {
1601 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1603 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1604 ir_node *op, ir_node *k, ir_mode *mode) {
1605 return new_rd_Rot(NULL, irg, block, op, k, mode);
1607 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1608 ir_node *op, ir_node *k, ir_mode *mode) {
1609 return new_rd_Carry(NULL, irg, block, op, k, mode);
1611 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1612 ir_node *op, ir_node *k, ir_mode *mode) {
1613 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1615 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1616 ir_node *op1, ir_node *op2) {
1617 return new_rd_Cmp(NULL, irg, block, op1, op2);
1619 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1620 ir_node *op, ir_mode *mode) {
1621 return new_rd_Conv(NULL, irg, block, op, mode);
1623 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1624 return new_rd_Cast(NULL, irg, block, op, to_tp);
1626 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1627 ir_node **in, ir_mode *mode) {
1628 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1630 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1631 ir_node *store, ir_node *adr, ir_mode *mode) {
1632 return new_rd_Load(NULL, irg, block, store, adr, mode);
1634 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1635 ir_node *store, ir_node *adr, ir_node *val) {
1636 return new_rd_Store(NULL, irg, block, store, adr, val);
1638 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1639 ir_node *size, ir_type *alloc_type, where_alloc where) {
1640 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1642 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1643 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1644 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1646 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1647 return new_rd_Sync(NULL, irg, block, arity, in);
1649 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1650 ir_mode *mode, long proj) {
1651 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1653 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1655 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1657 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1658 int arity, ir_node **in) {
1659 return new_rd_Tuple(NULL, irg, block, arity, in );
1661 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1662 ir_node *val, ir_mode *mode) {
1663 return new_rd_Id(NULL, irg, block, val, mode);
1665 ir_node *new_r_Bad (ir_graph *irg) {
1666 return new_rd_Bad(irg);
1668 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1669 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1671 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1672 return new_rd_Unknown(irg, m);
1674 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1675 return new_rd_CallBegin(NULL, irg, block, callee);
1677 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1678 return new_rd_EndReg(NULL, irg, block);
1680 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1681 return new_rd_EndExcept(NULL, irg, block);
1683 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1684 return new_rd_Break(NULL, irg, block);
1686 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1687 ir_mode *mode, long proj) {
1688 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1690 ir_node *new_r_NoMem (ir_graph *irg) {
1691 return new_rd_NoMem(irg);
1693 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1694 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1695 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1697 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1698 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1699 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1701 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1702 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1703 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1705 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1707 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1709 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1710 ir_node *store, ir_node *obj) {
1711 return new_rd_Raise(NULL, irg, block, store, obj);
1713 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1714 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1715 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1717 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1718 return new_rd_Pin(NULL, irg, block, node);
1721 /** ********************/
1722 /** public interfaces */
1723 /** construction tools */
1727 * - create a new Start node in the current block
1729 * @return s - pointer to the created Start node
1734 new_d_Start(dbg_info *db)
1738 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1739 op_Start, mode_T, 0, NULL);
1740 /* res->attr.start.irg = current_ir_graph; */
1742 res = optimize_node(res);
1743 IRN_VRFY_IRG(res, current_ir_graph);
1748 new_d_End(dbg_info *db)
1751 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1752 op_End, mode_X, -1, NULL);
1753 res = optimize_node(res);
1754 IRN_VRFY_IRG(res, current_ir_graph);
1759 /* Constructs a Block with a fixed number of predecessors.
1760 Does set current_block. Can be used with automatic Phi
1761 node construction. */
1763 new_d_Block(dbg_info *db, int arity, ir_node **in)
1767 int has_unknown = 0;
1769 res = new_bd_Block(db, arity, in);
1771 /* Create and initialize array for Phi-node construction. */
1772 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1773 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1774 current_ir_graph->n_loc);
1775 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1778 for (i = arity-1; i >= 0; i--)
1779 if (get_irn_op(in[i]) == op_Unknown) {
1784 if (!has_unknown) res = optimize_node(res);
1785 current_ir_graph->current_block = res;
1787 IRN_VRFY_IRG(res, current_ir_graph);
1792 /* ***********************************************************************/
1793 /* Methods necessary for automatic Phi node creation */
1795 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1796 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1797 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1798 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1800 Call Graph: ( A ---> B == A "calls" B)
1802 get_value mature_immBlock
1810 get_r_value_internal |
1814 new_rd_Phi0 new_rd_Phi_in
1816 * *************************************************************************** */
1818 /** Creates a Phi node with 0 predecessors. */
1819 static INLINE ir_node *
1820 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
1824 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1825 IRN_VRFY_IRG(res, irg);
1829 /* There are two implementations of the Phi node construction. The first
1830 is faster, but does not work for blocks with more than 2 predecessors.
1831 The second works always but is slower and causes more unnecessary Phi
1833 Select the implementations by the following preprocessor flag set in
1835 #if USE_FAST_PHI_CONSTRUCTION
1837 /* This is a stack used for allocating and deallocating nodes in
1838 new_rd_Phi_in. The original implementation used the obstack
1839 to model this stack, now it is explicit. This reduces side effects.
1841 #if USE_EXPLICIT_PHI_IN_STACK
1843 new_Phi_in_stack(void) {
1846 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1848 res->stack = NEW_ARR_F (ir_node *, 0);
1852 } /* new_Phi_in_stack */
1855 free_Phi_in_stack(Phi_in_stack *s) {
1856 DEL_ARR_F(s->stack);
1858 } /* free_Phi_in_stack */
1861 free_to_Phi_in_stack(ir_node *phi) {
1862 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1863 current_ir_graph->Phi_in_stack->pos)
1864 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1866 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1868 (current_ir_graph->Phi_in_stack->pos)++;
1869 } /* free_to_Phi_in_stack */
1871 static INLINE ir_node *
1872 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1873 int arity, ir_node **in) {
1875 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1876 int pos = current_ir_graph->Phi_in_stack->pos;
1880 /* We need to allocate a new node */
1881 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1882 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1884 /* reuse the old node and initialize it again. */
1887 assert (res->kind == k_ir_node);
1888 assert (res->op == op_Phi);
1892 assert (arity >= 0);
1893 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1894 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1896 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1898 (current_ir_graph->Phi_in_stack->pos)--;
1901 } /* alloc_or_pop_from_Phi_in_stack */
1902 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1905 * Creates a Phi node with a given, fixed array **in of predecessors.
1906 * If the Phi node is unnecessary, as the same value reaches the block
1907 * through all control flow paths, it is eliminated and the value
1908 * returned directly. This constructor is only intended for use in
1909 * the automatic Phi node generation triggered by get_value or mature.
1910 * The implementation is quite tricky and depends on the fact, that
1911 * the nodes are allocated on a stack:
1912 * The in array contains predecessors and NULLs. The NULLs appear,
1913 * if get_r_value_internal, that computed the predecessors, reached
1914 * the same block on two paths. In this case the same value reaches
1915 * this block on both paths, there is no definition in between. We need
1916 * not allocate a Phi where these path's merge, but we have to communicate
1917 * this fact to the caller. This happens by returning a pointer to the
1918 * node the caller _will_ allocate. (Yes, we predict the address. We can
1919 * do so because the nodes are allocated on the obstack.) The caller then
1920 * finds a pointer to itself and, when this routine is called again,
1921 * eliminates itself.
1923 static INLINE ir_node *
1924 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1927 ir_node *res, *known;
1929 /* Allocate a new node on the obstack. This can return a node to
1930 which some of the pointers in the in-array already point.
1931 Attention: the constructor copies the in array, i.e., the later
1932 changes to the array in this routine do not affect the
1933 constructed node! If the in array contains NULLs, there will be
1934 missing predecessors in the returned node. Is this a possible
1935 internal state of the Phi node generation? */
1936 #if USE_EXPLICIT_PHI_IN_STACK
1937 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1939 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1940 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1943 /* The in-array can contain NULLs. These were returned by
1944 get_r_value_internal if it reached the same block/definition on a
1945 second path. The NULLs are replaced by the node itself to
1946 simplify the test in the next loop. */
1947 for (i = 0; i < ins; ++i) {
1952 /* This loop checks whether the Phi has more than one predecessor.
1953 If so, it is a real Phi node and we break the loop. Else the Phi
1954 node merges the same definition on several paths and therefore is
1956 for (i = 0; i < ins; ++i) {
1957 if (in[i] == res || in[i] == known)
1966 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1968 #if USE_EXPLICIT_PHI_IN_STACK
1969 free_to_Phi_in_stack(res);
1971 edges_node_deleted(res, current_ir_graph);
1972 obstack_free(current_ir_graph->obst, res);
1976 res = optimize_node (res);
1977 IRN_VRFY_IRG(res, irg);
1980 /* return the pointer to the Phi node. This node might be deallocated! */
1982 } /* new_rd_Phi_in */
1985 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1988 * Allocates and returns this node. The routine called to allocate the
1989 * node might optimize it away and return a real value, or even a pointer
1990 * to a deallocated Phi node on top of the obstack!
1991 * This function is called with an in-array of proper size.
1994 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1996 ir_node *prevBlock, *res;
1999 /* This loop goes to all predecessor blocks of the block the Phi node is in
2000 and there finds the operands of the Phi node by calling
2001 get_r_value_internal. */
2002 for (i = 1; i <= ins; ++i) {
2003 assert (block->in[i]);
2004 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2006 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2009 /* After collecting all predecessors into the array nin a new Phi node
2010 with these predecessors is created. This constructor contains an
2011 optimization: If all predecessors of the Phi node are identical it
2012 returns the only operand instead of a new Phi node. If the value
2013 passes two different control flow edges without being defined, and
2014 this is the second path treated, a pointer to the node that will be
2015 allocated for the first path (recursion) is returned. We already
2016 know the address of this node, as it is the next node to be allocated
2017 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2018 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2020 /* Now we now the value for "pos" and can enter it in the array with
2021 all known local variables. Attention: this might be a pointer to
2022 a node, that later will be allocated!!! See new_rd_Phi_in().
2023 If this is called in mature, after some set_value() in the same block,
2024 the proper value must not be overwritten:
2026 get_value (makes Phi0, put's it into graph_arr)
2027 set_value (overwrites Phi0 in graph_arr)
2028 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2031 if (!block->attr.block.graph_arr[pos]) {
2032 block->attr.block.graph_arr[pos] = res;
2034 /* printf(" value already computed by %s\n",
2035 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2042 * This function returns the last definition of a variable. In case
2043 * this variable was last defined in a previous block, Phi nodes are
2044 * inserted. If the part of the firm graph containing the definition
2045 * is not yet constructed, a dummy Phi node is returned.
2048 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2051 /* There are 4 cases to treat.
2053 1. The block is not mature and we visit it the first time. We can not
2054 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2055 predecessors is returned. This node is added to the linked list (field
2056 "link") of the containing block to be completed when this block is
2057 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2060 2. The value is already known in this block, graph_arr[pos] is set and we
2061 visit the block the first time. We can return the value without
2062 creating any new nodes.
2064 3. The block is mature and we visit it the first time. A Phi node needs
2065 to be created (phi_merge). If the Phi is not needed, as all it's
2066 operands are the same value reaching the block through different
2067 paths, it's optimized away and the value itself is returned.
2069 4. The block is mature, and we visit it the second time. Now two
2070 subcases are possible:
2071 * The value was computed completely the last time we were here. This
2072 is the case if there is no loop. We can return the proper value.
2073 * The recursion that visited this node and set the flag did not
2074 return yet. We are computing a value in a loop and need to
2075 break the recursion without knowing the result yet.
2076 @@@ strange case. Straight forward we would create a Phi before
2077 starting the computation of it's predecessors. In this case we will
2078 find a Phi here in any case. The problem is that this implementation
2079 only creates a Phi after computing the predecessors, so that it is
2080 hard to compute self references of this Phi. @@@
2081 There is no simple check for the second subcase. Therefore we check
2082 for a second visit and treat all such cases as the second subcase.
2083 Anyways, the basic situation is the same: we reached a block
2084 on two paths without finding a definition of the value: No Phi
2085 nodes are needed on both paths.
2086 We return this information "Two paths, no Phi needed" by a very tricky
2087 implementation that relies on the fact that an obstack is a stack and
2088 will return a node with the same address on different allocations.
2089 Look also at phi_merge and new_rd_phi_in to understand this.
2090 @@@ Unfortunately this does not work, see testprogram
2091 three_cfpred_example.
2095 /* case 4 -- already visited. */
2096 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2098 /* visited the first time */
2099 set_irn_visited(block, get_irg_visited(current_ir_graph));
2101 /* Get the local valid value */
2102 res = block->attr.block.graph_arr[pos];
2104 /* case 2 -- If the value is actually computed, return it. */
2105 if (res) return res;
2107 if (block->attr.block.matured) { /* case 3 */
2109 /* The Phi has the same amount of ins as the corresponding block. */
2110 int ins = get_irn_arity(block);
2112 NEW_ARR_A (ir_node *, nin, ins);
2114 /* Phi merge collects the predecessors and then creates a node. */
2115 res = phi_merge (block, pos, mode, nin, ins);
2117 } else { /* case 1 */
2118 /* The block is not mature, we don't know how many in's are needed. A Phi
2119 with zero predecessors is created. Such a Phi node is called Phi0
2120 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2121 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2123 The Phi0 has to remember the pos of it's internal value. If the real
2124 Phi is computed, pos is used to update the array with the local
2127 res = new_rd_Phi0 (current_ir_graph, block, mode);
2128 res->attr.phi0_pos = pos;
2129 res->link = block->link;
2133 /* If we get here, the frontend missed a use-before-definition error */
2136 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2137 assert (mode->code >= irm_F && mode->code <= irm_P);
2138 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2139 tarval_mode_null[mode->code]);
2142 /* The local valid value is available now. */
2143 block->attr.block.graph_arr[pos] = res;
2146 } /* get_r_value_internal */
2151 it starts the recursion. This causes an Id at the entry of
2152 every block that has no definition of the value! **/
2154 #if USE_EXPLICIT_PHI_IN_STACK
2156 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2157 void free_Phi_in_stack(Phi_in_stack *s) { }
2160 static INLINE ir_node *
2161 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2162 ir_node **in, int ins, ir_node *phi0)
2165 ir_node *res, *known;
2167 /* Allocate a new node on the obstack. The allocation copies the in
2169 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2170 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2172 /* This loop checks whether the Phi has more than one predecessor.
2173 If so, it is a real Phi node and we break the loop. Else the
2174 Phi node merges the same definition on several paths and therefore
2175 is not needed. Don't consider Bad nodes! */
2177 for (i=0; i < ins; ++i)
2181 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2183 /* Optimize self referencing Phis: We can't detect them yet properly, as
2184 they still refer to the Phi0 they will replace. So replace right now. */
2185 if (phi0 && in[i] == phi0) in[i] = res;
2187 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2195 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2198 edges_node_deleted(res, current_ir_graph);
2199 obstack_free (current_ir_graph->obst, res);
2200 if (is_Phi(known)) {
2201 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2202 order, an enclosing Phi know may get superfluous. */
2203 res = optimize_in_place_2(known);
2205 exchange(known, res);
2211 /* A undefined value, e.g., in unreachable code. */
2215 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2216 IRN_VRFY_IRG(res, irg);
2217 /* Memory Phis in endless loops must be kept alive.
2218 As we can't distinguish these easily we keep all of them alive. */
2219 if ((res->op == op_Phi) && (mode == mode_M))
2220 add_End_keepalive(get_irg_end(irg), res);
2224 } /* new_rd_Phi_in */
2227 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2229 #if PRECISE_EXC_CONTEXT
2231 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2234 * Construct a new frag_array for node n.
2235 * Copy the content from the current graph_arr of the corresponding block:
2236 * this is the current state.
2237 * Set ProjM(n) as current memory state.
2238 * Further the last entry in frag_arr of current block points to n. This
2239 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2241 static INLINE ir_node ** new_frag_arr(ir_node *n)
2246 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2247 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2248 sizeof(ir_node *)*current_ir_graph->n_loc);
2250 /* turn off optimization before allocating Proj nodes, as res isn't
2252 opt = get_opt_optimize(); set_optimize(0);
2253 /* Here we rely on the fact that all frag ops have Memory as first result! */
2254 if (get_irn_op(n) == op_Call)
2255 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2256 else if (get_irn_op(n) == op_CopyB)
2257 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2259 assert((pn_Quot_M == pn_DivMod_M) &&
2260 (pn_Quot_M == pn_Div_M) &&
2261 (pn_Quot_M == pn_Mod_M) &&
2262 (pn_Quot_M == pn_Load_M) &&
2263 (pn_Quot_M == pn_Store_M) &&
2264 (pn_Quot_M == pn_Alloc_M) &&
2265 (pn_Quot_M == pn_Bound_M));
2266 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2270 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2272 } /* new_frag_arr */
2275 * Returns the frag_arr from a node.
2277 static INLINE ir_node **get_frag_arr(ir_node *n) {
2278 switch (get_irn_opcode(n)) {
2280 return n->attr.call.exc.frag_arr;
2282 return n->attr.alloc.exc.frag_arr;
2284 return n->attr.load.exc.frag_arr;
2286 return n->attr.store.exc.frag_arr;
2288 return n->attr.except.frag_arr;
2290 } /* get_frag_arr */
2293 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2295 if (!frag_arr[pos]) frag_arr[pos] = val;
2296 if (frag_arr[current_ir_graph->n_loc - 1]) {
2297 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2298 assert(arr != frag_arr && "Endless recursion detected");
2299 set_frag_value(arr, pos, val);
2304 for (i = 0; i < 1000; ++i) {
2305 if (!frag_arr[pos]) {
2306 frag_arr[pos] = val;
2308 if (frag_arr[current_ir_graph->n_loc - 1]) {
2309 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2315 assert(0 && "potential endless recursion");
2317 } /* set_frag_value */
2320 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2324 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2326 frag_arr = get_frag_arr(cfOp);
2327 res = frag_arr[pos];
2329 if (block->attr.block.graph_arr[pos]) {
2330 /* There was a set_value() after the cfOp and no get_value before that
2331 set_value(). We must build a Phi node now. */
2332 if (block->attr.block.matured) {
2333 int ins = get_irn_arity(block);
2335 NEW_ARR_A (ir_node *, nin, ins);
2336 res = phi_merge(block, pos, mode, nin, ins);
2338 res = new_rd_Phi0 (current_ir_graph, block, mode);
2339 res->attr.phi0_pos = pos;
2340 res->link = block->link;
2344 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2345 but this should be better: (remove comment if this works) */
2346 /* It's a Phi, we can write this into all graph_arrs with NULL */
2347 set_frag_value(block->attr.block.graph_arr, pos, res);
2349 res = get_r_value_internal(block, pos, mode);
2350 set_frag_value(block->attr.block.graph_arr, pos, res);
2354 } /* get_r_frag_value_internal */
2355 #endif /* PRECISE_EXC_CONTEXT */
2358 * Computes the predecessors for the real phi node, and then
2359 * allocates and returns this node. The routine called to allocate the
2360 * node might optimize it away and return a real value.
2361 * This function must be called with an in-array of proper size.
2364 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2366 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2369 /* If this block has no value at pos create a Phi0 and remember it
2370 in graph_arr to break recursions.
2371 Else we may not set graph_arr as there a later value is remembered. */
2373 if (!block->attr.block.graph_arr[pos]) {
2374 if (block == get_irg_start_block(current_ir_graph)) {
2375 /* Collapsing to Bad tarvals is no good idea.
2376 So we call a user-supplied routine here that deals with this case as
2377 appropriate for the given language. Sorrily the only help we can give
2378 here is the position.
2380 Even if all variables are defined before use, it can happen that
2381 we get to the start block, if a Cond has been replaced by a tuple
2382 (bad, jmp). In this case we call the function needlessly, eventually
2383 generating an non existent error.
2384 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2387 if (default_initialize_local_variable) {
2388 ir_node *rem = get_cur_block();
2390 set_cur_block(block);
2391 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2395 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2396 /* We don't need to care about exception ops in the start block.
2397 There are none by definition. */
2398 return block->attr.block.graph_arr[pos];
2400 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2401 block->attr.block.graph_arr[pos] = phi0;
2402 #if PRECISE_EXC_CONTEXT
2403 if (get_opt_precise_exc_context()) {
2404 /* Set graph_arr for fragile ops. Also here we should break recursion.
2405 We could choose a cyclic path through an cfop. But the recursion would
2406 break at some point. */
2407 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2413 /* This loop goes to all predecessor blocks of the block the Phi node
2414 is in and there finds the operands of the Phi node by calling
2415 get_r_value_internal. */
2416 for (i = 1; i <= ins; ++i) {
2417 prevCfOp = skip_Proj(block->in[i]);
2419 if (is_Bad(prevCfOp)) {
2420 /* In case a Cond has been optimized we would get right to the start block
2421 with an invalid definition. */
2422 nin[i-1] = new_Bad();
2425 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2427 if (!is_Bad(prevBlock)) {
2428 #if PRECISE_EXC_CONTEXT
2429 if (get_opt_precise_exc_context() &&
2430 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2431 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2432 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2435 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2437 nin[i-1] = new_Bad();
2441 /* We want to pass the Phi0 node to the constructor: this finds additional
2442 optimization possibilities.
2443 The Phi0 node either is allocated in this function, or it comes from
2444 a former call to get_r_value_internal. In this case we may not yet
2445 exchange phi0, as this is done in mature_immBlock. */
2447 phi0_all = block->attr.block.graph_arr[pos];
2448 if (!((get_irn_op(phi0_all) == op_Phi) &&
2449 (get_irn_arity(phi0_all) == 0) &&
2450 (get_nodes_block(phi0_all) == block)))
2456 /* After collecting all predecessors into the array nin a new Phi node
2457 with these predecessors is created. This constructor contains an
2458 optimization: If all predecessors of the Phi node are identical it
2459 returns the only operand instead of a new Phi node. */
2460 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2462 /* In case we allocated a Phi0 node at the beginning of this procedure,
2463 we need to exchange this Phi0 with the real Phi. */
2465 exchange(phi0, res);
2466 block->attr.block.graph_arr[pos] = res;
2467 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2468 only an optimization. */
2475 * This function returns the last definition of a variable. In case
2476 * this variable was last defined in a previous block, Phi nodes are
2477 * inserted. If the part of the firm graph containing the definition
2478 * is not yet constructed, a dummy Phi node is returned.
2481 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2484 /* There are 4 cases to treat.
2486 1. The block is not mature and we visit it the first time. We can not
2487 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2488 predecessors is returned. This node is added to the linked list (field
2489 "link") of the containing block to be completed when this block is
2490 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2493 2. The value is already known in this block, graph_arr[pos] is set and we
2494 visit the block the first time. We can return the value without
2495 creating any new nodes.
2497 3. The block is mature and we visit it the first time. A Phi node needs
2498 to be created (phi_merge). If the Phi is not needed, as all it's
2499 operands are the same value reaching the block through different
2500 paths, it's optimized away and the value itself is returned.
2502 4. The block is mature, and we visit it the second time. Now two
2503 subcases are possible:
2504 * The value was computed completely the last time we were here. This
2505 is the case if there is no loop. We can return the proper value.
2506 * The recursion that visited this node and set the flag did not
2507 return yet. We are computing a value in a loop and need to
2508 break the recursion. This case only happens if we visited
2509 the same block with phi_merge before, which inserted a Phi0.
2510 So we return the Phi0.
2513 /* case 4 -- already visited. */
2514 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2515 /* As phi_merge allocates a Phi0 this value is always defined. Here
2516 is the critical difference of the two algorithms. */
2517 assert(block->attr.block.graph_arr[pos]);
2518 return block->attr.block.graph_arr[pos];
2521 /* visited the first time */
2522 set_irn_visited(block, get_irg_visited(current_ir_graph));
2524 /* Get the local valid value */
2525 res = block->attr.block.graph_arr[pos];
2527 /* case 2 -- If the value is actually computed, return it. */
2528 if (res) { return res; };
2530 if (block->attr.block.matured) { /* case 3 */
2532 /* The Phi has the same amount of ins as the corresponding block. */
2533 int ins = get_irn_arity(block);
2535 NEW_ARR_A (ir_node *, nin, ins);
2537 /* Phi merge collects the predecessors and then creates a node. */
2538 res = phi_merge (block, pos, mode, nin, ins);
2540 } else { /* case 1 */
2541 /* The block is not mature, we don't know how many in's are needed. A Phi
2542 with zero predecessors is created. Such a Phi node is called Phi0
2543 node. The Phi0 is then added to the list of Phi0 nodes in this block
2544 to be matured by mature_immBlock later.
2545 The Phi0 has to remember the pos of it's internal value. If the real
2546 Phi is computed, pos is used to update the array with the local
2548 res = new_rd_Phi0 (current_ir_graph, block, mode);
2549 res->attr.phi0_pos = pos;
2550 res->link = block->link;
2554 /* If we get here, the frontend missed a use-before-definition error */
2557 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2558 assert (mode->code >= irm_F && mode->code <= irm_P);
2559 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2560 get_mode_null(mode));
2563 /* The local valid value is available now. */
2564 block->attr.block.graph_arr[pos] = res;
2567 } /* get_r_value_internal */
2569 #endif /* USE_FAST_PHI_CONSTRUCTION */
2571 /* ************************************************************************** */
2574 * Finalize a Block node, when all control flows are known.
2575 * Acceptable parameters are only Block nodes.
2578 mature_immBlock(ir_node *block)
2584 assert (get_irn_opcode(block) == iro_Block);
2585 /* @@@ should be commented in
2586 assert (!get_Block_matured(block) && "Block already matured"); */
2588 if (!get_Block_matured(block)) {
2589 ins = ARR_LEN (block->in)-1;
2590 /* Fix block parameters */
2591 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2593 /* An array for building the Phi nodes. */
2594 NEW_ARR_A (ir_node *, nin, ins);
2596 /* Traverse a chain of Phi nodes attached to this block and mature
2598 for (n = block->link; n; n = next) {
2599 inc_irg_visited(current_ir_graph);
2601 exchange(n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2604 block->attr.block.matured = 1;
2606 /* Now, as the block is a finished firm node, we can optimize it.
2607 Since other nodes have been allocated since the block was created
2608 we can not free the node on the obstack. Therefore we have to call
2610 Unfortunately the optimization does not change a lot, as all allocated
2611 nodes refer to the unoptimized node.
2612 We can call _2, as global cse has no effect on blocks. */
2613 block = optimize_in_place_2(block);
2614 IRN_VRFY_IRG(block, current_ir_graph);
2616 } /* mature_immBlock */
2619 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2620 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2624 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2625 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2629 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2630 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2631 } /* new_d_Const_long */
2634 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2635 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2636 } /* new_d_Const_type */
2640 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2641 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2645 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2646 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2650 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2652 assert(arg->op == op_Cond);
2653 arg->attr.cond.kind = fragmentary;
2654 arg->attr.cond.default_proj = max_proj;
2655 res = new_Proj(arg, mode_X, max_proj);
2657 } /* new_d_defaultProj */
2660 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2661 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2665 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2666 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2667 } /* new_d_strictConv */
2670 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2671 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2675 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2676 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2685 * Allocate the frag array.
2687 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2688 if (get_opt_precise_exc_context()) {
2689 if ((current_ir_graph->phase_state == phase_building) &&
2690 (get_irn_op(res) == op) && /* Could be optimized away. */
2691 !*frag_store) /* Could be a cse where the arr is already set. */ {
2692 *frag_store = new_frag_arr(res);
2695 } /* allocate_frag_arr */
2698 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2700 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2);
2701 res->attr.except.pin_state = op_pin_state_pinned;
2702 #if PRECISE_EXC_CONTEXT
2703 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2710 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2712 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2);
2713 res->attr.except.pin_state = op_pin_state_pinned;
2714 #if PRECISE_EXC_CONTEXT
2715 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2719 } /* new_d_DivMod */
2722 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2725 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2);
2726 res->attr.except.pin_state = op_pin_state_pinned;
2727 #if PRECISE_EXC_CONTEXT
2728 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2735 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2737 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2);
2738 res->attr.except.pin_state = op_pin_state_pinned;
2739 #if PRECISE_EXC_CONTEXT
2740 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2759 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2760 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2764 new_d_Jmp(dbg_info *db) {
2765 return new_bd_Jmp(db, current_ir_graph->current_block);
2769 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2770 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2774 new_d_Cond(dbg_info *db, ir_node *c) {
2775 return new_bd_Cond(db, current_ir_graph->current_block, c);
2779 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2783 res = new_bd_Call(db, current_ir_graph->current_block,
2784 store, callee, arity, in, tp);
2785 #if PRECISE_EXC_CONTEXT
2786 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2793 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2794 return new_bd_Return(db, current_ir_graph->current_block,
2796 } /* new_d_Return */
2799 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2801 res = new_bd_Load(db, current_ir_graph->current_block,
2803 #if PRECISE_EXC_CONTEXT
2804 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2811 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2813 res = new_bd_Store(db, current_ir_graph->current_block,
2815 #if PRECISE_EXC_CONTEXT
2816 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2823 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2827 res = new_bd_Alloc(db, current_ir_graph->current_block,
2828 store, size, alloc_type, where);
2829 #if PRECISE_EXC_CONTEXT
2830 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2837 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2838 ir_node *size, ir_type *free_type, where_alloc where)
2840 return new_bd_Free(db, current_ir_graph->current_block,
2841 store, ptr, size, free_type, where);
2845 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2846 /* GL: objptr was called frame before. Frame was a bad choice for the name
2847 as the operand could as well be a pointer to a dynamic object. */
2849 return new_bd_Sel(db, current_ir_graph->current_block,
2850 store, objptr, 0, NULL, ent);
2851 } /* new_d_simpleSel */
2854 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel)
2856 return new_bd_Sel(db, current_ir_graph->current_block,
2857 store, objptr, n_index, index, sel);
2861 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2863 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2865 } /* new_d_SymConst_type */
2868 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind)
2870 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2871 value, kind, firm_unknown_type);
2872 } /* new_d_SymConst */
2875 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2876 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2882 return _new_d_Bad();
2886 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2887 return new_bd_Confirm(db, current_ir_graph->current_block,
2889 } /* new_d_Confirm */
2892 new_d_Unknown(ir_mode *m) {
2893 return new_bd_Unknown(m);
2894 } /* new_d_Unknown */
2897 new_d_CallBegin(dbg_info *db, ir_node *call) {
2898 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2899 } /* new_d_CallBegin */
2902 new_d_EndReg(dbg_info *db) {
2903 return new_bd_EndReg(db, current_ir_graph->current_block);
2904 } /* new_d_EndReg */
2907 new_d_EndExcept(dbg_info *db) {
2908 return new_bd_EndExcept(db, current_ir_graph->current_block);
2909 } /* new_d_EndExcept */
2912 new_d_Break(dbg_info *db) {
2913 return new_bd_Break(db, current_ir_graph->current_block);
2917 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2918 return new_bd_Filter (db, current_ir_graph->current_block,
2920 } /* new_d_Filter */
2923 (new_d_NoMem)(void) {
2924 return _new_d_NoMem();
2928 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2929 ir_node *ir_true, ir_mode *mode) {
2930 return new_bd_Mux(db, current_ir_graph->current_block,
2931 sel, ir_false, ir_true, mode);
2935 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2936 return new_bd_Psi(db, current_ir_graph->current_block,
2937 arity, conds, vals, mode);
2940 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2941 ir_node *dst, ir_node *src, ir_type *data_type) {
2943 res = new_bd_CopyB(db, current_ir_graph->current_block,
2944 store, dst, src, data_type);
2945 #if PRECISE_EXC_CONTEXT
2946 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2952 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2953 return new_bd_InstOf(db, current_ir_graph->current_block,
2954 store, objptr, type);
2955 } /* new_d_InstOf */
2958 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2959 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2962 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2963 ir_node *idx, ir_node *lower, ir_node *upper) {
2965 res = new_bd_Bound(db, current_ir_graph->current_block,
2966 store, idx, lower, upper);
2967 #if PRECISE_EXC_CONTEXT
2968 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2974 new_d_Pin(dbg_info *db, ir_node *node) {
2975 return new_bd_Pin(db, current_ir_graph->current_block, node);
2978 /* ********************************************************************* */
2979 /* Comfortable interface with automatic Phi node construction. */
2980 /* (Uses also constructors of ?? interface, except new_Block. */
2981 /* ********************************************************************* */
2983 /* Block construction */
2984 /* immature Block without predecessors */
2985 ir_node *new_d_immBlock(dbg_info *db) {
2988 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2989 /* creates a new dynamic in-array as length of in is -1 */
2990 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2991 current_ir_graph->current_block = res;
2992 res->attr.block.matured = 0;
2993 res->attr.block.dead = 0;
2994 /* res->attr.block.exc = exc_normal; */
2995 /* res->attr.block.handler_entry = 0; */
2996 res->attr.block.irg = current_ir_graph;
2997 res->attr.block.backedge = NULL;
2998 res->attr.block.in_cg = NULL;
2999 res->attr.block.cg_backedge = NULL;
3000 set_Block_block_visited(res, 0);
3002 /* Create and initialize array for Phi-node construction. */
3003 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3004 current_ir_graph->n_loc);
3005 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3007 /* Immature block may not be optimized! */
3008 IRN_VRFY_IRG(res, current_ir_graph);
3011 } /* new_d_immBlock */
3014 new_immBlock(void) {
3015 return new_d_immBlock(NULL);
3016 } /* new_immBlock */
3018 /* add an edge to a jmp/control flow node */
3020 add_immBlock_pred(ir_node *block, ir_node *jmp)
3022 if (block->attr.block.matured) {
3023 assert(0 && "Error: Block already matured!\n");
3026 int n = ARR_LEN(block->in) - 1;
3027 assert(jmp != NULL);
3028 ARR_APP1(ir_node *, block->in, jmp);
3030 hook_set_irn_n(block, n, jmp, NULL);
3032 } /* add_immBlock_pred */
3034 /* changing the current block */
3036 set_cur_block(ir_node *target) {
3037 current_ir_graph->current_block = target;
3038 } /* set_cur_block */
3040 /* ************************ */
3041 /* parameter administration */
3043 /* get a value from the parameter array from the current block by its index */
3045 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3046 ir_graph *irg = current_ir_graph;
3047 assert(get_irg_phase_state(irg) == phase_building);
3048 inc_irg_visited(irg);
3050 return get_r_value_internal(irg->current_block, pos + 1, mode);
3053 /* get a value from the parameter array from the current block by its index */
3055 get_value(int pos, ir_mode *mode) {
3056 return get_d_value(NULL, pos, mode);
3059 /* set a value at position pos in the parameter array from the current block */
3061 set_value(int pos, ir_node *value) {
3062 ir_graph *irg = current_ir_graph;
3063 assert(get_irg_phase_state(irg) == phase_building);
3064 assert(pos+1 < irg->n_loc);
3065 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3068 /* Find the value number for a node in the current block.*/
3070 find_value(ir_node *value) {
3072 ir_node *bl = current_ir_graph->current_block;
3074 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3075 if (bl->attr.block.graph_arr[i] == value)
3080 /* get the current store */
3083 ir_graph *irg = current_ir_graph;
3085 assert(get_irg_phase_state(irg) == phase_building);
3086 /* GL: one could call get_value instead */
3087 inc_irg_visited(irg);
3088 return get_r_value_internal(irg->current_block, 0, mode_M);
3091 /* set the current store: handles automatic Sync construction for Load nodes */
3093 set_store(ir_node *store)
3095 ir_node *load, *pload, *pred, *in[2];
3097 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3098 /* Beware: due to dead code elimination, a store might become a Bad node even in
3099 the construction phase. */
3100 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3102 if (get_opt_auto_create_sync()) {
3103 /* handle non-volatile Load nodes by automatically creating Sync's */
3104 load = skip_Proj(store);
3105 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3106 pred = get_Load_mem(load);
3108 if (is_Sync(pred)) {
3109 /* a Load after a Sync: move it up */
3110 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3112 set_Load_mem(load, get_memop_mem(mem));
3113 add_Sync_pred(pred, store);
3117 pload = skip_Proj(pred);
3118 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3119 /* a Load after a Load: create a new Sync */
3120 set_Load_mem(load, get_Load_mem(pload));
3124 store = new_Sync(2, in);
3129 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3133 keep_alive(ir_node *ka) {
3134 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3137 /* --- Useful access routines --- */
3138 /* Returns the current block of the current graph. To set the current
3139 block use set_cur_block. */
3140 ir_node *get_cur_block(void) {
3141 return get_irg_current_block(current_ir_graph);
3142 } /* get_cur_block */
3144 /* Returns the frame type of the current graph */
3145 ir_type *get_cur_frame_type(void) {
3146 return get_irg_frame_type(current_ir_graph);
3147 } /* get_cur_frame_type */
3150 /* ********************************************************************* */
3153 /* call once for each run of the library */
3155 init_cons(uninitialized_local_variable_func_t *func) {
3156 default_initialize_local_variable = func;
3160 irp_finalize_cons(void) {
3162 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3163 irg_finalize_cons(get_irp_irg(i));
3165 irp->phase_state = phase_high;
3166 } /* irp_finalize_cons */
3169 ir_node *new_Block(int arity, ir_node **in) {
3170 return new_d_Block(NULL, arity, in);
3172 ir_node *new_Start (void) {
3173 return new_d_Start(NULL);
3175 ir_node *new_End (void) {
3176 return new_d_End(NULL);
3178 ir_node *new_Jmp (void) {
3179 return new_d_Jmp(NULL);
3181 ir_node *new_IJmp (ir_node *tgt) {
3182 return new_d_IJmp(NULL, tgt);
3184 ir_node *new_Cond (ir_node *c) {
3185 return new_d_Cond(NULL, c);
3187 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3188 return new_d_Return(NULL, store, arity, in);
3190 ir_node *new_Const (ir_mode *mode, tarval *con) {
3191 return new_d_Const(NULL, mode, con);
3194 ir_node *new_Const_long(ir_mode *mode, long value)
3196 return new_d_Const_long(NULL, mode, value);
3199 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3200 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3203 ir_node *new_SymConst_type (symconst_symbol value, symconst_kind kind, ir_type *type) {
3204 return new_d_SymConst_type(NULL, value, kind, type);
3206 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3207 return new_d_SymConst(NULL, value, kind);
3209 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3210 return new_d_simpleSel(NULL, store, objptr, ent);
3212 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3214 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3216 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3218 return new_d_Call(NULL, store, callee, arity, in, tp);
3220 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3221 return new_d_Add(NULL, op1, op2, mode);
3223 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3224 return new_d_Sub(NULL, op1, op2, mode);
3226 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3227 return new_d_Minus(NULL, op, mode);
3229 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3230 return new_d_Mul(NULL, op1, op2, mode);
3232 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3233 return new_d_Quot(NULL, memop, op1, op2);
3235 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3236 return new_d_DivMod(NULL, memop, op1, op2);
3238 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3239 return new_d_Div(NULL, memop, op1, op2);
3241 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3242 return new_d_Mod(NULL, memop, op1, op2);
3244 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3245 return new_d_Abs(NULL, op, mode);
3247 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3248 return new_d_And(NULL, op1, op2, mode);
3250 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3251 return new_d_Or(NULL, op1, op2, mode);
3253 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3254 return new_d_Eor(NULL, op1, op2, mode);
3256 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3257 return new_d_Not(NULL, op, mode);
3259 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3260 return new_d_Shl(NULL, op, k, mode);
3262 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3263 return new_d_Shr(NULL, op, k, mode);
3265 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3266 return new_d_Shrs(NULL, op, k, mode);
3268 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3269 return new_d_Rot(NULL, op, k, mode);
3271 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3272 return new_d_Carry(NULL, op1, op2, mode);
3274 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3275 return new_d_Borrow(NULL, op1, op2, mode);
3277 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3278 return new_d_Cmp(NULL, op1, op2);
3280 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3281 return new_d_Conv(NULL, op, mode);
3283 ir_node *new_strictConv (ir_node *op, ir_mode *mode) {
3284 return new_d_strictConv(NULL, op, mode);
3286 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3287 return new_d_Cast(NULL, op, to_tp);
3289 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3290 return new_d_Phi(NULL, arity, in, mode);
3292 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3293 return new_d_Load(NULL, store, addr, mode);
3295 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3296 return new_d_Store(NULL, store, addr, val);
3298 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3299 where_alloc where) {
3300 return new_d_Alloc(NULL, store, size, alloc_type, where);
3302 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3303 ir_type *free_type, where_alloc where) {
3304 return new_d_Free(NULL, store, ptr, size, free_type, where);
3306 ir_node *new_Sync (int arity, ir_node *in[]) {
3307 return new_d_Sync(NULL, arity, in);
3309 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3310 return new_d_Proj(NULL, arg, mode, proj);
3312 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3313 return new_d_defaultProj(NULL, arg, max_proj);
3315 ir_node *new_Tuple (int arity, ir_node **in) {
3316 return new_d_Tuple(NULL, arity, in);
3318 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3319 return new_d_Id(NULL, val, mode);
3321 ir_node *new_Bad (void) {
3324 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3325 return new_d_Confirm (NULL, val, bound, cmp);
3327 ir_node *new_Unknown(ir_mode *m) {
3328 return new_d_Unknown(m);
3330 ir_node *new_CallBegin (ir_node *callee) {
3331 return new_d_CallBegin(NULL, callee);
3333 ir_node *new_EndReg (void) {
3334 return new_d_EndReg(NULL);
3336 ir_node *new_EndExcept (void) {
3337 return new_d_EndExcept(NULL);
3339 ir_node *new_Break (void) {
3340 return new_d_Break(NULL);
3342 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3343 return new_d_Filter(NULL, arg, mode, proj);
3345 ir_node *new_NoMem (void) {
3346 return new_d_NoMem();
3348 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3349 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3351 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3352 return new_d_Psi(NULL, arity, conds, vals, mode);
3354 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3355 return new_d_CopyB(NULL, store, dst, src, data_type);
3357 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3358 return new_d_InstOf (NULL, store, objptr, ent);
3360 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3361 return new_d_Raise(NULL, store, obj);
3363 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3364 return new_d_Bound(NULL, store, idx, lower, upper);
3366 ir_node *new_Pin(ir_node *node) {
3367 return new_d_Pin(NULL, node);