3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
29 #include "irgraph_t.h"
33 #include "firm_common_t.h"
39 #include "irbackedge_t.h"
41 #include "iredges_t.h"
44 #if USE_EXPLICIT_PHI_IN_STACK
45 /* A stack needed for the automatic Phi node construction in constructor
46 Phi_in. Redefinition in irgraph.c!! */
51 typedef struct Phi_in_stack Phi_in_stack;
54 /* when we need verifying */
56 # define IRN_VRFY_IRG(res, irg)
58 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
62 * Language dependent variable initialization callback.
64 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* creates a bd constructor for a binop */
67 #define NEW_BD_BINOP(instr, float_support) \
69 new_bd_##instr(dbg_info *db, ir_node *block, \
70 ir_node *op1, ir_node *op2, ir_mode *mode) \
74 ir_graph *irg = current_ir_graph; \
77 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
78 if (float_support && mode_is_float(mode) && \
79 (get_irg_fp_model(irg) & fp_exceptions)) \
81 res = optimize_node(res); \
82 IRN_VRFY_IRG(res, irg); \
86 /* creates a bd constructor for an unop */
87 #define NEW_BD_UNOP(instr, float_support) \
89 new_bd_##instr(dbg_info *db, ir_node *block, \
90 ir_node *op, ir_mode *mode) \
93 ir_graph *irg = current_ir_graph; \
94 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
95 if (float_support && mode_is_float(mode) && \
96 (get_irg_fp_model(irg) & fp_exceptions)) \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr, float_support) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2) \
111 ir_graph *irg = current_ir_graph; \
112 ir_mode *mode = get_irn_mode(op1); \
116 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
117 if (float_support && mode_is_float(mode) && \
118 (get_irg_fp_model(irg) & fp_exceptions)) \
120 res = optimize_node(res); \
121 IRN_VRFY_IRG(res, irg); \
125 /* creates a rd constructor for a binop */
126 #define NEW_RD_BINOP(instr) \
128 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
129 ir_node *op1, ir_node *op2, ir_mode *mode) \
132 ir_graph *rem = current_ir_graph; \
133 current_ir_graph = irg; \
134 res = new_bd_##instr(db, block, op1, op2, mode); \
135 current_ir_graph = rem; \
139 /* creates a rd constructor for an unop */
140 #define NEW_RD_UNOP(instr) \
142 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
143 ir_node *op, ir_mode *mode) \
146 ir_graph *rem = current_ir_graph; \
147 current_ir_graph = irg; \
148 res = new_bd_##instr(db, block, op, mode); \
149 current_ir_graph = rem; \
153 /* creates a rd constructor for an divop */
154 #define NEW_RD_DIVOP(instr) \
156 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
157 ir_node *memop, ir_node *op1, ir_node *op2) \
160 ir_graph *rem = current_ir_graph; \
161 current_ir_graph = irg; \
162 res = new_bd_##instr(db, block, memop, op1, op2); \
163 current_ir_graph = rem; \
167 /* creates a d constructor for an binop */
168 #define NEW_D_BINOP(instr) \
170 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
171 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
174 /* creates a d constructor for an unop */
175 #define NEW_D_UNOP(instr) \
177 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
178 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
183 * Constructs a Block with a fixed number of predecessors.
184 * Does not set current_block. Can not be used with automatic
185 * Phi node construction.
188 new_bd_Block(dbg_info *db, int arity, ir_node **in)
191 ir_graph *irg = current_ir_graph;
193 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
194 set_Block_matured(res, 1);
195 set_Block_block_visited(res, 0);
197 /* res->attr.block.exc = exc_normal; */
198 /* res->attr.block.handler_entry = 0; */
199 res->attr.block.dead = 0;
200 res->attr.block.irg = irg;
201 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
202 res->attr.block.in_cg = NULL;
203 res->attr.block.cg_backedge = NULL;
204 res->attr.block.extblk = NULL;
206 IRN_VRFY_IRG(res, irg);
211 new_bd_Start(dbg_info *db, ir_node *block)
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
217 /* res->attr.start.irg = irg; */
219 IRN_VRFY_IRG(res, irg);
224 new_bd_End(dbg_info *db, ir_node *block)
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
243 ir_graph *irg = current_ir_graph;
247 /* Don't assert that block matured: the use of this constructor is strongly
249 if ( get_Block_matured(block) )
250 assert( get_irn_arity(block) == arity );
252 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
254 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
256 for (i = arity-1; i >= 0; i--)
257 if (get_irn_op(in[i]) == op_Unknown) {
262 if (!has_unknown) res = optimize_node (res);
263 IRN_VRFY_IRG(res, irg);
265 /* Memory Phis in endless loops must be kept alive.
266 As we can't distinguish these easily we keep all of them alive. */
267 if ((res->op == op_Phi) && (mode == mode_M))
268 add_End_keepalive(get_irg_end(irg), res);
273 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
276 ir_graph *irg = current_ir_graph;
278 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
279 res->attr.con.tv = con;
280 set_Const_type(res, tp); /* Call method because of complex assertion. */
281 res = optimize_node (res);
282 assert(get_Const_type(res) == tp);
283 IRN_VRFY_IRG(res, irg);
286 } /* new_bd_Const_type */
289 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
291 ir_graph *irg = current_ir_graph;
293 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
297 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value)
299 ir_graph *irg = current_ir_graph;
301 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
302 } /* new_bd_Const_long */
305 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
308 ir_graph *irg = current_ir_graph;
310 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
311 res = optimize_node(res);
312 IRN_VRFY_IRG(res, irg);
317 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
321 ir_graph *irg = current_ir_graph;
323 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
324 res->attr.proj = proj;
327 assert(get_Proj_pred(res));
328 assert(get_nodes_block(get_Proj_pred(res)));
330 res = optimize_node(res);
332 IRN_VRFY_IRG(res, irg);
338 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
342 ir_graph *irg = current_ir_graph;
344 assert(arg->op == op_Cond);
345 arg->attr.cond.kind = fragmentary;
346 arg->attr.cond.default_proj = max_proj;
347 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
349 } /* new_bd_defaultProj */
352 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag)
355 ir_graph *irg = current_ir_graph;
357 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
358 res->attr.conv.strict = strict_flag;
359 if (mode_is_float(mode) && get_irg_fp_model(irg) & fp_exceptions)
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
367 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
370 ir_graph *irg = current_ir_graph;
372 assert(is_atomic_type(to_tp));
374 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
375 res->attr.cast.totype = to_tp;
376 res = optimize_node(res);
377 IRN_VRFY_IRG(res, irg);
382 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in)
385 ir_graph *irg = current_ir_graph;
387 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
388 res = optimize_node (res);
389 IRN_VRFY_IRG(res, irg);
393 #define supports_float 1
394 #define only_integer 0
396 NEW_BD_BINOP(Add, supports_float)
397 NEW_BD_BINOP(Sub, supports_float)
398 NEW_BD_UNOP(Minus, supports_float)
399 NEW_BD_BINOP(Mul, supports_float)
400 NEW_BD_DIVOP(Quot, supports_float)
401 NEW_BD_DIVOP(DivMod, only_integer)
402 NEW_BD_DIVOP(Div, only_integer)
403 NEW_BD_DIVOP(Mod, only_integer)
404 NEW_BD_BINOP(And, only_integer)
405 NEW_BD_BINOP(Or, only_integer)
406 NEW_BD_BINOP(Eor, only_integer)
407 NEW_BD_UNOP(Not, only_integer)
408 NEW_BD_BINOP(Shl, only_integer)
409 NEW_BD_BINOP(Shr, only_integer)
410 NEW_BD_BINOP(Shrs, only_integer)
411 NEW_BD_BINOP(Rot, only_integer)
412 NEW_BD_UNOP(Abs, supports_float)
413 NEW_BD_BINOP(Carry, only_integer)
414 NEW_BD_BINOP(Borrow, only_integer)
417 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
421 ir_graph *irg = current_ir_graph;
422 ir_mode *mode = get_irn_mode(op1);
425 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
426 if (mode_is_float(mode) && get_irg_fp_model(irg) & fp_exceptions)
428 res = optimize_node(res);
429 IRN_VRFY_IRG(res, irg);
434 new_bd_Jmp(dbg_info *db, ir_node *block)
437 ir_graph *irg = current_ir_graph;
439 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
440 res = optimize_node (res);
441 IRN_VRFY_IRG (res, irg);
446 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt)
449 ir_graph *irg = current_ir_graph;
451 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
452 res = optimize_node (res);
453 IRN_VRFY_IRG (res, irg);
455 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
461 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c)
464 ir_graph *irg = current_ir_graph;
466 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
467 res->attr.cond.kind = dense;
468 res->attr.cond.default_proj = 0;
469 res->attr.cond.pred = COND_JMP_PRED_NONE;
470 res = optimize_node (res);
471 IRN_VRFY_IRG(res, irg);
476 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
477 ir_node *callee, int arity, ir_node **in, ir_type *tp)
482 ir_graph *irg = current_ir_graph;
485 NEW_ARR_A(ir_node *, r_in, r_arity);
488 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
490 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
492 assert((get_unknown_type() == tp) || is_Method_type(tp));
493 set_Call_type(res, tp);
494 res->attr.call.callee_arr = NULL;
495 res = optimize_node(res);
496 IRN_VRFY_IRG(res, irg);
501 new_bd_Return (dbg_info *db, ir_node *block,
502 ir_node *store, int arity, ir_node **in)
507 ir_graph *irg = current_ir_graph;
510 NEW_ARR_A (ir_node *, r_in, r_arity);
512 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
513 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
514 res = optimize_node(res);
515 IRN_VRFY_IRG(res, irg);
520 new_bd_Load(dbg_info *db, ir_node *block,
521 ir_node *store, ir_node *adr, ir_mode *mode)
525 ir_graph *irg = current_ir_graph;
529 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
530 res->attr.load.load_mode = mode;
531 res->attr.load.volatility = volatility_non_volatile;
532 res = optimize_node(res);
533 IRN_VRFY_IRG(res, irg);
538 new_bd_Store(dbg_info *db, ir_node *block,
539 ir_node *store, ir_node *adr, ir_node *val)
543 ir_graph *irg = current_ir_graph;
548 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
549 res->attr.store.volatility = volatility_non_volatile;
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
557 ir_node *size, ir_type *alloc_type, where_alloc where)
561 ir_graph *irg = current_ir_graph;
565 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
566 res->attr.alloc.where = where;
567 res->attr.alloc.type = alloc_type;
568 res = optimize_node(res);
569 IRN_VRFY_IRG(res, irg);
574 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
575 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
579 ir_graph *irg = current_ir_graph;
584 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
585 res->attr.free.where = where;
586 res->attr.free.type = free_type;
587 res = optimize_node(res);
588 IRN_VRFY_IRG(res, irg);
593 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
594 int arity, ir_node **in, entity *ent)
599 ir_graph *irg = current_ir_graph;
601 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
604 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
607 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
609 * FIXM: Sel's can select functions which should be of mode mode_P_code.
611 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
612 res->attr.sel.ent = ent;
613 res = optimize_node(res);
614 IRN_VRFY_IRG(res, irg);
619 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
620 symconst_kind symkind, ir_type *tp) {
623 ir_graph *irg = current_ir_graph;
625 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
626 mode = mode_P_data; /* FIXME: can be mode_P_code */
630 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
632 res->attr.symc.num = symkind;
633 res->attr.symc.sym = value;
634 res->attr.symc.tp = tp;
636 res = optimize_node(res);
637 IRN_VRFY_IRG(res, irg);
639 } /* new_bd_SymConst_type */
642 new_bd_SymConst(dbg_info *db, ir_node *block, symconst_symbol value,
643 symconst_kind symkind)
645 ir_graph *irg = current_ir_graph;
647 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
649 } /* new_bd_SymConst */
652 new_bd_Sync(dbg_info *db, ir_node *block)
655 ir_graph *irg = current_ir_graph;
657 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
658 /* no need to call optimize node here, Sync are always created with no predecessors */
659 IRN_VRFY_IRG(res, irg);
664 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
666 ir_node *in[2], *res;
667 ir_graph *irg = current_ir_graph;
671 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
672 res->attr.confirm_cmp = cmp;
673 res = optimize_node (res);
674 IRN_VRFY_IRG(res, irg);
678 /* this function is often called with current_ir_graph unset */
680 new_bd_Unknown(ir_mode *m)
683 ir_graph *irg = current_ir_graph;
685 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
686 res = optimize_node(res);
688 } /* new_bd_Unknown */
691 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call)
695 ir_graph *irg = current_ir_graph;
697 in[0] = get_Call_ptr(call);
698 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
699 /* res->attr.callbegin.irg = irg; */
700 res->attr.callbegin.call = call;
701 res = optimize_node(res);
702 IRN_VRFY_IRG(res, irg);
704 } /* new_bd_CallBegin */
707 new_bd_EndReg(dbg_info *db, ir_node *block)
710 ir_graph *irg = current_ir_graph;
712 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
713 set_irg_end_reg(irg, res);
714 IRN_VRFY_IRG(res, irg);
716 } /* new_bd_EndReg */
719 new_bd_EndExcept(dbg_info *db, ir_node *block)
722 ir_graph *irg = current_ir_graph;
724 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
725 set_irg_end_except(irg, res);
726 IRN_VRFY_IRG (res, irg);
728 } /* new_bd_EndExcept */
731 new_bd_Break(dbg_info *db, ir_node *block)
734 ir_graph *irg = current_ir_graph;
736 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
737 res = optimize_node(res);
738 IRN_VRFY_IRG(res, irg);
743 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
747 ir_graph *irg = current_ir_graph;
749 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
750 res->attr.filter.proj = proj;
751 res->attr.filter.in_cg = NULL;
752 res->attr.filter.backedge = NULL;
755 assert(get_Proj_pred(res));
756 assert(get_nodes_block(get_Proj_pred(res)));
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
761 } /* new_bd_Filter */
764 new_bd_Mux(dbg_info *db, ir_node *block,
765 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
769 ir_graph *irg = current_ir_graph;
775 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
778 res = optimize_node(res);
779 IRN_VRFY_IRG(res, irg);
784 new_bd_Psi(dbg_info *db, ir_node *block,
785 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
789 ir_graph *irg = current_ir_graph;
792 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
794 for (i = 0; i < arity; ++i) {
796 in[2 * i + 1] = vals[i];
800 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
803 res = optimize_node(res);
804 IRN_VRFY_IRG(res, irg);
809 new_bd_CopyB(dbg_info *db, ir_node *block,
810 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
814 ir_graph *irg = current_ir_graph;
820 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
821 res->attr.copyb.data_type = data_type;
822 res = optimize_node(res);
823 IRN_VRFY_IRG(res, irg);
828 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
829 ir_node *objptr, ir_type *type)
833 ir_graph *irg = current_ir_graph;
837 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
838 res->attr.instof.type = type;
839 res = optimize_node(res);
840 IRN_VRFY_IRG(res, irg);
842 } /* new_bd_InstOf */
845 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
849 ir_graph *irg = current_ir_graph;
853 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
854 res = optimize_node(res);
855 IRN_VRFY_IRG(res, irg);
860 new_bd_Bound(dbg_info *db, ir_node *block,
861 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
865 ir_graph *irg = current_ir_graph;
871 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
872 res = optimize_node(res);
873 IRN_VRFY_IRG(res, irg);
878 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node)
881 ir_graph *irg = current_ir_graph;
883 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
884 res = optimize_node(res);
885 IRN_VRFY_IRG(res, irg);
889 /* --------------------------------------------- */
890 /* private interfaces, for professional use only */
891 /* --------------------------------------------- */
893 /* Constructs a Block with a fixed number of predecessors.
894 Does not set current_block. Can not be used with automatic
895 Phi node construction. */
897 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in)
899 ir_graph *rem = current_ir_graph;
902 current_ir_graph = irg;
903 res = new_bd_Block(db, arity, in);
904 current_ir_graph = rem;
910 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
912 ir_graph *rem = current_ir_graph;
915 current_ir_graph = irg;
916 res = new_bd_Start(db, block);
917 current_ir_graph = rem;
923 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
926 ir_graph *rem = current_ir_graph;
928 current_ir_graph = rem;
929 res = new_bd_End(db, block);
930 current_ir_graph = rem;
935 /* Creates a Phi node with all predecessors. Calling this constructor
936 is only allowed if the corresponding block is mature. */
938 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
941 ir_graph *rem = current_ir_graph;
943 current_ir_graph = irg;
944 res = new_bd_Phi(db, block,arity, in, mode);
945 current_ir_graph = rem;
951 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
954 ir_graph *rem = current_ir_graph;
956 current_ir_graph = irg;
957 res = new_bd_Const_type(db, block, mode, con, tp);
958 current_ir_graph = rem;
961 } /* new_rd_Const_type */
964 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
967 ir_graph *rem = current_ir_graph;
969 current_ir_graph = irg;
970 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
971 current_ir_graph = rem;
977 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
979 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
980 } /* new_rd_Const_long */
983 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
986 ir_graph *rem = current_ir_graph;
988 current_ir_graph = irg;
989 res = new_bd_Id(db, block, val, mode);
990 current_ir_graph = rem;
996 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1000 ir_graph *rem = current_ir_graph;
1002 current_ir_graph = irg;
1003 res = new_bd_Proj(db, block, arg, mode, proj);
1004 current_ir_graph = rem;
1010 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1014 ir_graph *rem = current_ir_graph;
1016 current_ir_graph = irg;
1017 res = new_bd_defaultProj(db, block, arg, max_proj);
1018 current_ir_graph = rem;
1021 } /* new_rd_defaultProj */
1024 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1027 ir_graph *rem = current_ir_graph;
1029 current_ir_graph = irg;
1030 res = new_bd_Conv(db, block, op, mode, 0);
1031 current_ir_graph = rem;
1037 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1040 ir_graph *rem = current_ir_graph;
1042 current_ir_graph = irg;
1043 res = new_bd_Cast(db, block, op, to_tp);
1044 current_ir_graph = rem;
1050 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1053 ir_graph *rem = current_ir_graph;
1055 current_ir_graph = irg;
1056 res = new_bd_Tuple(db, block, arity, in);
1057 current_ir_graph = rem;
1060 } /* new_rd_Tuple */
1067 NEW_RD_DIVOP(DivMod)
1080 NEW_RD_BINOP(Borrow)
1083 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1084 ir_node *op1, ir_node *op2)
1087 ir_graph *rem = current_ir_graph;
1089 current_ir_graph = irg;
1090 res = new_bd_Cmp(db, block, op1, op2);
1091 current_ir_graph = rem;
1097 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block)
1100 ir_graph *rem = current_ir_graph;
1102 current_ir_graph = irg;
1103 res = new_bd_Jmp(db, block);
1104 current_ir_graph = rem;
1110 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1113 ir_graph *rem = current_ir_graph;
1115 current_ir_graph = irg;
1116 res = new_bd_IJmp(db, block, tgt);
1117 current_ir_graph = rem;
1123 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1126 ir_graph *rem = current_ir_graph;
1128 current_ir_graph = irg;
1129 res = new_bd_Cond(db, block, c);
1130 current_ir_graph = rem;
1136 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1137 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1140 ir_graph *rem = current_ir_graph;
1142 current_ir_graph = irg;
1143 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1144 current_ir_graph = rem;
1150 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1151 ir_node *store, int arity, ir_node **in)
1154 ir_graph *rem = current_ir_graph;
1156 current_ir_graph = irg;
1157 res = new_bd_Return(db, block, store, arity, in);
1158 current_ir_graph = rem;
1161 } /* new_rd_Return */
1164 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1165 ir_node *store, ir_node *adr, ir_mode *mode)
1168 ir_graph *rem = current_ir_graph;
1170 current_ir_graph = irg;
1171 res = new_bd_Load(db, block, store, adr, mode);
1172 current_ir_graph = rem;
1178 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1179 ir_node *store, ir_node *adr, ir_node *val)
1182 ir_graph *rem = current_ir_graph;
1184 current_ir_graph = irg;
1185 res = new_bd_Store(db, block, store, adr, val);
1186 current_ir_graph = rem;
1189 } /* new_rd_Store */
1192 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1193 ir_node *size, ir_type *alloc_type, where_alloc where)
1196 ir_graph *rem = current_ir_graph;
1198 current_ir_graph = irg;
1199 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1200 current_ir_graph = rem;
1203 } /* new_rd_Alloc */
1206 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1207 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1210 ir_graph *rem = current_ir_graph;
1212 current_ir_graph = irg;
1213 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1214 current_ir_graph = rem;
1220 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1221 ir_node *store, ir_node *objptr, entity *ent)
1224 ir_graph *rem = current_ir_graph;
1226 current_ir_graph = irg;
1227 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1228 current_ir_graph = rem;
1231 } /* new_rd_simpleSel */
1234 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1235 int arity, ir_node **in, entity *ent)
1238 ir_graph *rem = current_ir_graph;
1240 current_ir_graph = irg;
1241 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1242 current_ir_graph = rem;
1248 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1249 symconst_kind symkind, ir_type *tp)
1252 ir_graph *rem = current_ir_graph;
1254 current_ir_graph = irg;
1255 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1256 current_ir_graph = rem;
1259 } /* new_rd_SymConst_type */
1262 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1263 symconst_kind symkind)
1265 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1267 } /* new_rd_SymConst */
1269 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1271 symconst_symbol sym = {(ir_type *)symbol};
1272 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1273 } /* new_rd_SymConst_addr_ent */
1275 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1276 symconst_symbol sym = {(ir_type *)symbol};
1277 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1278 } /* new_rd_SymConst_addr_name */
1280 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1281 symconst_symbol sym = {symbol};
1282 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1283 } /* new_rd_SymConst_type_tag */
1285 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1286 symconst_symbol sym = {symbol};
1287 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1288 } /* new_rd_SymConst_size */
1290 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1291 symconst_symbol sym = {symbol};
1292 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1293 } /* new_rd_SymConst_align */
1296 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1299 ir_graph *rem = current_ir_graph;
1302 current_ir_graph = irg;
1303 res = new_bd_Sync(db, block);
1304 current_ir_graph = rem;
1306 for (i = 0; i < arity; ++i)
1307 add_Sync_pred(res, in[i]);
1313 new_rd_Bad(ir_graph *irg) {
1314 return get_irg_bad(irg);
1318 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1321 ir_graph *rem = current_ir_graph;
1323 current_ir_graph = irg;
1324 res = new_bd_Confirm(db, block, val, bound, cmp);
1325 current_ir_graph = rem;
1328 } /* new_rd_Confirm */
1330 /* this function is often called with current_ir_graph unset */
1332 new_rd_Unknown(ir_graph *irg, ir_mode *m)
1335 ir_graph *rem = current_ir_graph;
1337 current_ir_graph = irg;
1338 res = new_bd_Unknown(m);
1339 current_ir_graph = rem;
1342 } /* new_rd_Unknown */
1345 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1348 ir_graph *rem = current_ir_graph;
1350 current_ir_graph = irg;
1351 res = new_bd_CallBegin(db, block, call);
1352 current_ir_graph = rem;
1355 } /* new_rd_CallBegin */
1358 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
1362 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1363 set_irg_end_reg(irg, res);
1364 IRN_VRFY_IRG(res, irg);
1366 } /* new_rd_EndReg */
1369 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
1373 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1374 set_irg_end_except(irg, res);
1375 IRN_VRFY_IRG (res, irg);
1377 } /* new_rd_EndExcept */
1380 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block)
1383 ir_graph *rem = current_ir_graph;
1385 current_ir_graph = irg;
1386 res = new_bd_Break(db, block);
1387 current_ir_graph = rem;
1390 } /* new_rd_Break */
1393 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1397 ir_graph *rem = current_ir_graph;
1399 current_ir_graph = irg;
1400 res = new_bd_Filter(db, block, arg, mode, proj);
1401 current_ir_graph = rem;
1404 } /* new_rd_Filter */
1407 new_rd_NoMem(ir_graph *irg) {
1408 return get_irg_no_mem(irg);
1409 } /* new_rd_NoMem */
1412 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1413 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1416 ir_graph *rem = current_ir_graph;
1418 current_ir_graph = irg;
1419 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1420 current_ir_graph = rem;
1426 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1427 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1430 ir_graph *rem = current_ir_graph;
1432 current_ir_graph = irg;
1433 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1434 current_ir_graph = rem;
1439 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1440 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1443 ir_graph *rem = current_ir_graph;
1445 current_ir_graph = irg;
1446 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1447 current_ir_graph = rem;
1450 } /* new_rd_CopyB */
1453 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1454 ir_node *objptr, ir_type *type)
1457 ir_graph *rem = current_ir_graph;
1459 current_ir_graph = irg;
1460 res = new_bd_InstOf(db, block, store, objptr, type);
1461 current_ir_graph = rem;
1464 } /* new_rd_InstOf */
1467 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1470 ir_graph *rem = current_ir_graph;
1472 current_ir_graph = irg;
1473 res = new_bd_Raise(db, block, store, obj);
1474 current_ir_graph = rem;
1477 } /* new_rd_Raise */
1479 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1480 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1483 ir_graph *rem = current_ir_graph;
1485 current_ir_graph = irg;
1486 res = new_bd_Bound(db, block, store, idx, lower, upper);
1487 current_ir_graph = rem;
1490 } /* new_rd_Bound */
1492 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node)
1495 ir_graph *rem = current_ir_graph;
1497 current_ir_graph = irg;
1498 res = new_bd_Pin(db, block, node);
1499 current_ir_graph = rem;
1504 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1505 return new_rd_Block(NULL, irg, arity, in);
1507 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1508 return new_rd_Start(NULL, irg, block);
1510 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1511 return new_rd_End(NULL, irg, block);
1513 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1514 return new_rd_Jmp(NULL, irg, block);
1516 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1517 return new_rd_IJmp(NULL, irg, block, tgt);
1519 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1520 return new_rd_Cond(NULL, irg, block, c);
1522 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1523 ir_node *store, int arity, ir_node **in) {
1524 return new_rd_Return(NULL, irg, block, store, arity, in);
1526 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1527 ir_mode *mode, tarval *con) {
1528 return new_rd_Const(NULL, irg, block, mode, con);
1530 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1531 ir_mode *mode, long value) {
1532 return new_rd_Const_long(NULL, irg, block, mode, value);
1534 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1535 ir_mode *mode, tarval *con, ir_type *tp) {
1536 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1538 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1539 symconst_symbol value, symconst_kind symkind) {
1540 return new_rd_SymConst(NULL, irg, block, value, symkind);
1542 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1543 ir_node *objptr, entity *ent) {
1544 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1546 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1547 ir_node *objptr, int n_index, ir_node **index,
1549 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1551 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1552 ir_node *callee, int arity, ir_node **in,
1554 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1556 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1557 ir_node *op1, ir_node *op2, ir_mode *mode) {
1558 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1560 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1561 ir_node *op1, ir_node *op2, ir_mode *mode) {
1562 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1564 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1565 ir_node *op, ir_mode *mode) {
1566 return new_rd_Minus(NULL, irg, block, op, mode);
1568 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1569 ir_node *op1, ir_node *op2, ir_mode *mode) {
1570 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1572 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1573 ir_node *memop, ir_node *op1, ir_node *op2) {
1574 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1576 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1577 ir_node *memop, ir_node *op1, ir_node *op2) {
1578 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1580 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1581 ir_node *memop, ir_node *op1, ir_node *op2) {
1582 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1584 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1585 ir_node *memop, ir_node *op1, ir_node *op2) {
1586 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1588 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1589 ir_node *op, ir_mode *mode) {
1590 return new_rd_Abs(NULL, irg, block, op, mode);
1592 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1593 ir_node *op1, ir_node *op2, ir_mode *mode) {
1594 return new_rd_And(NULL, irg, block, op1, op2, mode);
1596 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1597 ir_node *op1, ir_node *op2, ir_mode *mode) {
1598 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1600 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1601 ir_node *op1, ir_node *op2, ir_mode *mode) {
1602 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1604 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1605 ir_node *op, ir_mode *mode) {
1606 return new_rd_Not(NULL, irg, block, op, mode);
1608 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1609 ir_node *op, ir_node *k, ir_mode *mode) {
1610 return new_rd_Shl(NULL, irg, block, op, k, mode);
1612 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1613 ir_node *op, ir_node *k, ir_mode *mode) {
1614 return new_rd_Shr(NULL, irg, block, op, k, mode);
1616 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1617 ir_node *op, ir_node *k, ir_mode *mode) {
1618 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1620 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1621 ir_node *op, ir_node *k, ir_mode *mode) {
1622 return new_rd_Rot(NULL, irg, block, op, k, mode);
1624 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1625 ir_node *op, ir_node *k, ir_mode *mode) {
1626 return new_rd_Carry(NULL, irg, block, op, k, mode);
1628 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1629 ir_node *op, ir_node *k, ir_mode *mode) {
1630 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1632 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1633 ir_node *op1, ir_node *op2) {
1634 return new_rd_Cmp(NULL, irg, block, op1, op2);
1636 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1637 ir_node *op, ir_mode *mode) {
1638 return new_rd_Conv(NULL, irg, block, op, mode);
1640 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1641 return new_rd_Cast(NULL, irg, block, op, to_tp);
1643 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1644 ir_node **in, ir_mode *mode) {
1645 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1647 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1648 ir_node *store, ir_node *adr, ir_mode *mode) {
1649 return new_rd_Load(NULL, irg, block, store, adr, mode);
1651 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1652 ir_node *store, ir_node *adr, ir_node *val) {
1653 return new_rd_Store(NULL, irg, block, store, adr, val);
1655 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1656 ir_node *size, ir_type *alloc_type, where_alloc where) {
1657 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1659 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1660 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1661 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1663 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1664 return new_rd_Sync(NULL, irg, block, arity, in);
1666 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1667 ir_mode *mode, long proj) {
1668 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1670 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1672 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1674 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1675 int arity, ir_node **in) {
1676 return new_rd_Tuple(NULL, irg, block, arity, in );
1678 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1679 ir_node *val, ir_mode *mode) {
1680 return new_rd_Id(NULL, irg, block, val, mode);
1682 ir_node *new_r_Bad (ir_graph *irg) {
1683 return new_rd_Bad(irg);
1685 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1686 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1688 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1689 return new_rd_Unknown(irg, m);
1691 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1692 return new_rd_CallBegin(NULL, irg, block, callee);
1694 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1695 return new_rd_EndReg(NULL, irg, block);
1697 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1698 return new_rd_EndExcept(NULL, irg, block);
1700 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1701 return new_rd_Break(NULL, irg, block);
1703 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1704 ir_mode *mode, long proj) {
1705 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1707 ir_node *new_r_NoMem (ir_graph *irg) {
1708 return new_rd_NoMem(irg);
1710 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1711 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1712 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1714 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1715 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1716 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1718 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1719 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1720 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1722 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1724 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1726 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1727 ir_node *store, ir_node *obj) {
1728 return new_rd_Raise(NULL, irg, block, store, obj);
1730 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1731 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1732 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1734 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1735 return new_rd_Pin(NULL, irg, block, node);
1738 /** ********************/
1739 /** public interfaces */
1740 /** construction tools */
1744 * - create a new Start node in the current block
1746 * @return s - pointer to the created Start node
1751 new_d_Start(dbg_info *db)
1755 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1756 op_Start, mode_T, 0, NULL);
1757 /* res->attr.start.irg = current_ir_graph; */
1759 res = optimize_node(res);
1760 IRN_VRFY_IRG(res, current_ir_graph);
1765 new_d_End(dbg_info *db)
1768 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1769 op_End, mode_X, -1, NULL);
1770 res = optimize_node(res);
1771 IRN_VRFY_IRG(res, current_ir_graph);
1776 /* Constructs a Block with a fixed number of predecessors.
1777 Does set current_block. Can be used with automatic Phi
1778 node construction. */
1780 new_d_Block(dbg_info *db, int arity, ir_node **in)
1784 int has_unknown = 0;
1786 res = new_bd_Block(db, arity, in);
1788 /* Create and initialize array for Phi-node construction. */
1789 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1790 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1791 current_ir_graph->n_loc);
1792 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1795 for (i = arity-1; i >= 0; i--)
1796 if (get_irn_op(in[i]) == op_Unknown) {
1801 if (!has_unknown) res = optimize_node(res);
1802 current_ir_graph->current_block = res;
1804 IRN_VRFY_IRG(res, current_ir_graph);
1809 /* ***********************************************************************/
1810 /* Methods necessary for automatic Phi node creation */
1812 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1813 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1814 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1815 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1817 Call Graph: ( A ---> B == A "calls" B)
1819 get_value mature_immBlock
1827 get_r_value_internal |
1831 new_rd_Phi0 new_rd_Phi_in
1833 * *************************************************************************** */
1835 /** Creates a Phi node with 0 predecessors. */
1836 static INLINE ir_node *
1837 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
1841 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1842 IRN_VRFY_IRG(res, irg);
1846 /* There are two implementations of the Phi node construction. The first
1847 is faster, but does not work for blocks with more than 2 predecessors.
1848 The second works always but is slower and causes more unnecessary Phi
1850 Select the implementations by the following preprocessor flag set in
1852 #if USE_FAST_PHI_CONSTRUCTION
1854 /* This is a stack used for allocating and deallocating nodes in
1855 new_rd_Phi_in. The original implementation used the obstack
1856 to model this stack, now it is explicit. This reduces side effects.
1858 #if USE_EXPLICIT_PHI_IN_STACK
1860 new_Phi_in_stack(void) {
1863 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1865 res->stack = NEW_ARR_F (ir_node *, 0);
1869 } /* new_Phi_in_stack */
1872 free_Phi_in_stack(Phi_in_stack *s) {
1873 DEL_ARR_F(s->stack);
1875 } /* free_Phi_in_stack */
1878 free_to_Phi_in_stack(ir_node *phi) {
1879 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1880 current_ir_graph->Phi_in_stack->pos)
1881 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1883 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1885 (current_ir_graph->Phi_in_stack->pos)++;
1886 } /* free_to_Phi_in_stack */
1888 static INLINE ir_node *
1889 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1890 int arity, ir_node **in) {
1892 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1893 int pos = current_ir_graph->Phi_in_stack->pos;
1897 /* We need to allocate a new node */
1898 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1899 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1901 /* reuse the old node and initialize it again. */
1904 assert (res->kind == k_ir_node);
1905 assert (res->op == op_Phi);
1909 assert (arity >= 0);
1910 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1911 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1913 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1915 (current_ir_graph->Phi_in_stack->pos)--;
1918 } /* alloc_or_pop_from_Phi_in_stack */
1919 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1922 * Creates a Phi node with a given, fixed array **in of predecessors.
1923 * If the Phi node is unnecessary, as the same value reaches the block
1924 * through all control flow paths, it is eliminated and the value
1925 * returned directly. This constructor is only intended for use in
1926 * the automatic Phi node generation triggered by get_value or mature.
1927 * The implementation is quite tricky and depends on the fact, that
1928 * the nodes are allocated on a stack:
1929 * The in array contains predecessors and NULLs. The NULLs appear,
1930 * if get_r_value_internal, that computed the predecessors, reached
1931 * the same block on two paths. In this case the same value reaches
1932 * this block on both paths, there is no definition in between. We need
1933 * not allocate a Phi where these path's merge, but we have to communicate
1934 * this fact to the caller. This happens by returning a pointer to the
1935 * node the caller _will_ allocate. (Yes, we predict the address. We can
1936 * do so because the nodes are allocated on the obstack.) The caller then
1937 * finds a pointer to itself and, when this routine is called again,
1938 * eliminates itself.
1940 static INLINE ir_node *
1941 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1944 ir_node *res, *known;
1946 /* Allocate a new node on the obstack. This can return a node to
1947 which some of the pointers in the in-array already point.
1948 Attention: the constructor copies the in array, i.e., the later
1949 changes to the array in this routine do not affect the
1950 constructed node! If the in array contains NULLs, there will be
1951 missing predecessors in the returned node. Is this a possible
1952 internal state of the Phi node generation? */
1953 #if USE_EXPLICIT_PHI_IN_STACK
1954 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1956 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1957 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1960 /* The in-array can contain NULLs. These were returned by
1961 get_r_value_internal if it reached the same block/definition on a
1962 second path. The NULLs are replaced by the node itself to
1963 simplify the test in the next loop. */
1964 for (i = 0; i < ins; ++i) {
1969 /* This loop checks whether the Phi has more than one predecessor.
1970 If so, it is a real Phi node and we break the loop. Else the Phi
1971 node merges the same definition on several paths and therefore is
1973 for (i = 0; i < ins; ++i) {
1974 if (in[i] == res || in[i] == known)
1983 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1985 #if USE_EXPLICIT_PHI_IN_STACK
1986 free_to_Phi_in_stack(res);
1988 edges_node_deleted(res, current_ir_graph);
1989 obstack_free(current_ir_graph->obst, res);
1993 res = optimize_node (res);
1994 IRN_VRFY_IRG(res, irg);
1997 /* return the pointer to the Phi node. This node might be deallocated! */
1999 } /* new_rd_Phi_in */
2002 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2005 * Allocates and returns this node. The routine called to allocate the
2006 * node might optimize it away and return a real value, or even a pointer
2007 * to a deallocated Phi node on top of the obstack!
2008 * This function is called with an in-array of proper size.
2011 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2013 ir_node *prevBlock, *res;
2016 /* This loop goes to all predecessor blocks of the block the Phi node is in
2017 and there finds the operands of the Phi node by calling
2018 get_r_value_internal. */
2019 for (i = 1; i <= ins; ++i) {
2020 assert (block->in[i]);
2021 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2023 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2026 /* After collecting all predecessors into the array nin a new Phi node
2027 with these predecessors is created. This constructor contains an
2028 optimization: If all predecessors of the Phi node are identical it
2029 returns the only operand instead of a new Phi node. If the value
2030 passes two different control flow edges without being defined, and
2031 this is the second path treated, a pointer to the node that will be
2032 allocated for the first path (recursion) is returned. We already
2033 know the address of this node, as it is the next node to be allocated
2034 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2035 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2037 /* Now we now the value for "pos" and can enter it in the array with
2038 all known local variables. Attention: this might be a pointer to
2039 a node, that later will be allocated!!! See new_rd_Phi_in().
2040 If this is called in mature, after some set_value() in the same block,
2041 the proper value must not be overwritten:
2043 get_value (makes Phi0, put's it into graph_arr)
2044 set_value (overwrites Phi0 in graph_arr)
2045 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2048 if (!block->attr.block.graph_arr[pos]) {
2049 block->attr.block.graph_arr[pos] = res;
2051 /* printf(" value already computed by %s\n",
2052 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2059 * This function returns the last definition of a variable. In case
2060 * this variable was last defined in a previous block, Phi nodes are
2061 * inserted. If the part of the firm graph containing the definition
2062 * is not yet constructed, a dummy Phi node is returned.
2065 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2068 /* There are 4 cases to treat.
2070 1. The block is not mature and we visit it the first time. We can not
2071 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2072 predecessors is returned. This node is added to the linked list (field
2073 "link") of the containing block to be completed when this block is
2074 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2077 2. The value is already known in this block, graph_arr[pos] is set and we
2078 visit the block the first time. We can return the value without
2079 creating any new nodes.
2081 3. The block is mature and we visit it the first time. A Phi node needs
2082 to be created (phi_merge). If the Phi is not needed, as all it's
2083 operands are the same value reaching the block through different
2084 paths, it's optimized away and the value itself is returned.
2086 4. The block is mature, and we visit it the second time. Now two
2087 subcases are possible:
2088 * The value was computed completely the last time we were here. This
2089 is the case if there is no loop. We can return the proper value.
2090 * The recursion that visited this node and set the flag did not
2091 return yet. We are computing a value in a loop and need to
2092 break the recursion without knowing the result yet.
2093 @@@ strange case. Straight forward we would create a Phi before
2094 starting the computation of it's predecessors. In this case we will
2095 find a Phi here in any case. The problem is that this implementation
2096 only creates a Phi after computing the predecessors, so that it is
2097 hard to compute self references of this Phi. @@@
2098 There is no simple check for the second subcase. Therefore we check
2099 for a second visit and treat all such cases as the second subcase.
2100 Anyways, the basic situation is the same: we reached a block
2101 on two paths without finding a definition of the value: No Phi
2102 nodes are needed on both paths.
2103 We return this information "Two paths, no Phi needed" by a very tricky
2104 implementation that relies on the fact that an obstack is a stack and
2105 will return a node with the same address on different allocations.
2106 Look also at phi_merge and new_rd_phi_in to understand this.
2107 @@@ Unfortunately this does not work, see testprogram
2108 three_cfpred_example.
2112 /* case 4 -- already visited. */
2113 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2115 /* visited the first time */
2116 set_irn_visited(block, get_irg_visited(current_ir_graph));
2118 /* Get the local valid value */
2119 res = block->attr.block.graph_arr[pos];
2121 /* case 2 -- If the value is actually computed, return it. */
2122 if (res) return res;
2124 if (block->attr.block.matured) { /* case 3 */
2126 /* The Phi has the same amount of ins as the corresponding block. */
2127 int ins = get_irn_arity(block);
2129 NEW_ARR_A (ir_node *, nin, ins);
2131 /* Phi merge collects the predecessors and then creates a node. */
2132 res = phi_merge (block, pos, mode, nin, ins);
2134 } else { /* case 1 */
2135 /* The block is not mature, we don't know how many in's are needed. A Phi
2136 with zero predecessors is created. Such a Phi node is called Phi0
2137 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2138 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2140 The Phi0 has to remember the pos of it's internal value. If the real
2141 Phi is computed, pos is used to update the array with the local
2144 res = new_rd_Phi0 (current_ir_graph, block, mode);
2145 res->attr.phi0_pos = pos;
2146 res->link = block->link;
2150 /* If we get here, the frontend missed a use-before-definition error */
2153 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2154 assert (mode->code >= irm_F && mode->code <= irm_P);
2155 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2156 tarval_mode_null[mode->code]);
2159 /* The local valid value is available now. */
2160 block->attr.block.graph_arr[pos] = res;
2163 } /* get_r_value_internal */
2168 it starts the recursion. This causes an Id at the entry of
2169 every block that has no definition of the value! **/
2171 #if USE_EXPLICIT_PHI_IN_STACK
2173 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2174 void free_Phi_in_stack(Phi_in_stack *s) { }
2177 static INLINE ir_node *
2178 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2179 ir_node **in, int ins, ir_node *phi0)
2182 ir_node *res, *known;
2184 /* Allocate a new node on the obstack. The allocation copies the in
2186 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2187 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2189 /* This loop checks whether the Phi has more than one predecessor.
2190 If so, it is a real Phi node and we break the loop. Else the
2191 Phi node merges the same definition on several paths and therefore
2192 is not needed. Don't consider Bad nodes! */
2194 for (i=0; i < ins; ++i)
2198 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2200 /* Optimize self referencing Phis: We can't detect them yet properly, as
2201 they still refer to the Phi0 they will replace. So replace right now. */
2202 if (phi0 && in[i] == phi0) in[i] = res;
2204 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2212 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2215 edges_node_deleted(res, current_ir_graph);
2216 obstack_free (current_ir_graph->obst, res);
2217 if (is_Phi(known)) {
2218 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2219 order, an enclosing Phi know may get superfluous. */
2220 res = optimize_in_place_2(known);
2222 exchange(known, res);
2228 /* A undefined value, e.g., in unreachable code. */
2232 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2233 IRN_VRFY_IRG(res, irg);
2234 /* Memory Phis in endless loops must be kept alive.
2235 As we can't distinguish these easily we keep all of them alive. */
2236 if ((res->op == op_Phi) && (mode == mode_M))
2237 add_End_keepalive(get_irg_end(irg), res);
2241 } /* new_rd_Phi_in */
2244 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2246 #if PRECISE_EXC_CONTEXT
2248 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2251 * Construct a new frag_array for node n.
2252 * Copy the content from the current graph_arr of the corresponding block:
2253 * this is the current state.
2254 * Set ProjM(n) as current memory state.
2255 * Further the last entry in frag_arr of current block points to n. This
2256 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2258 static INLINE ir_node ** new_frag_arr(ir_node *n)
2263 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2264 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2265 sizeof(ir_node *)*current_ir_graph->n_loc);
2267 /* turn off optimization before allocating Proj nodes, as res isn't
2269 opt = get_opt_optimize(); set_optimize(0);
2270 /* Here we rely on the fact that all frag ops have Memory as first result! */
2271 if (get_irn_op(n) == op_Call)
2272 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2273 else if (get_irn_op(n) == op_CopyB)
2274 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2276 assert((pn_Quot_M == pn_DivMod_M) &&
2277 (pn_Quot_M == pn_Div_M) &&
2278 (pn_Quot_M == pn_Mod_M) &&
2279 (pn_Quot_M == pn_Load_M) &&
2280 (pn_Quot_M == pn_Store_M) &&
2281 (pn_Quot_M == pn_Alloc_M) &&
2282 (pn_Quot_M == pn_Bound_M));
2283 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2287 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2289 } /* new_frag_arr */
2292 * Returns the frag_arr from a node.
2294 static INLINE ir_node **get_frag_arr(ir_node *n) {
2295 switch (get_irn_opcode(n)) {
2297 return n->attr.call.exc.frag_arr;
2299 return n->attr.alloc.exc.frag_arr;
2301 return n->attr.load.exc.frag_arr;
2303 return n->attr.store.exc.frag_arr;
2305 return n->attr.except.frag_arr;
2307 } /* get_frag_arr */
2310 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2312 if (!frag_arr[pos]) frag_arr[pos] = val;
2313 if (frag_arr[current_ir_graph->n_loc - 1]) {
2314 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2315 assert(arr != frag_arr && "Endless recursion detected");
2316 set_frag_value(arr, pos, val);
2321 for (i = 0; i < 1000; ++i) {
2322 if (!frag_arr[pos]) {
2323 frag_arr[pos] = val;
2325 if (frag_arr[current_ir_graph->n_loc - 1]) {
2326 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2332 assert(0 && "potential endless recursion");
2334 } /* set_frag_value */
2337 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2341 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2343 frag_arr = get_frag_arr(cfOp);
2344 res = frag_arr[pos];
2346 if (block->attr.block.graph_arr[pos]) {
2347 /* There was a set_value() after the cfOp and no get_value before that
2348 set_value(). We must build a Phi node now. */
2349 if (block->attr.block.matured) {
2350 int ins = get_irn_arity(block);
2352 NEW_ARR_A (ir_node *, nin, ins);
2353 res = phi_merge(block, pos, mode, nin, ins);
2355 res = new_rd_Phi0 (current_ir_graph, block, mode);
2356 res->attr.phi0_pos = pos;
2357 res->link = block->link;
2361 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2362 but this should be better: (remove comment if this works) */
2363 /* It's a Phi, we can write this into all graph_arrs with NULL */
2364 set_frag_value(block->attr.block.graph_arr, pos, res);
2366 res = get_r_value_internal(block, pos, mode);
2367 set_frag_value(block->attr.block.graph_arr, pos, res);
2371 } /* get_r_frag_value_internal */
2372 #endif /* PRECISE_EXC_CONTEXT */
2375 * Computes the predecessors for the real phi node, and then
2376 * allocates and returns this node. The routine called to allocate the
2377 * node might optimize it away and return a real value.
2378 * This function must be called with an in-array of proper size.
2381 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2383 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2386 /* If this block has no value at pos create a Phi0 and remember it
2387 in graph_arr to break recursions.
2388 Else we may not set graph_arr as there a later value is remembered. */
2390 if (!block->attr.block.graph_arr[pos]) {
2391 if (block == get_irg_start_block(current_ir_graph)) {
2392 /* Collapsing to Bad tarvals is no good idea.
2393 So we call a user-supplied routine here that deals with this case as
2394 appropriate for the given language. Sorrily the only help we can give
2395 here is the position.
2397 Even if all variables are defined before use, it can happen that
2398 we get to the start block, if a Cond has been replaced by a tuple
2399 (bad, jmp). In this case we call the function needlessly, eventually
2400 generating an non existent error.
2401 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2404 if (default_initialize_local_variable) {
2405 ir_node *rem = get_cur_block();
2407 set_cur_block(block);
2408 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2412 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2413 /* We don't need to care about exception ops in the start block.
2414 There are none by definition. */
2415 return block->attr.block.graph_arr[pos];
2417 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2418 block->attr.block.graph_arr[pos] = phi0;
2419 #if PRECISE_EXC_CONTEXT
2420 if (get_opt_precise_exc_context()) {
2421 /* Set graph_arr for fragile ops. Also here we should break recursion.
2422 We could choose a cyclic path through an cfop. But the recursion would
2423 break at some point. */
2424 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2430 /* This loop goes to all predecessor blocks of the block the Phi node
2431 is in and there finds the operands of the Phi node by calling
2432 get_r_value_internal. */
2433 for (i = 1; i <= ins; ++i) {
2434 prevCfOp = skip_Proj(block->in[i]);
2436 if (is_Bad(prevCfOp)) {
2437 /* In case a Cond has been optimized we would get right to the start block
2438 with an invalid definition. */
2439 nin[i-1] = new_Bad();
2442 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2444 if (!is_Bad(prevBlock)) {
2445 #if PRECISE_EXC_CONTEXT
2446 if (get_opt_precise_exc_context() &&
2447 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2448 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2449 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2452 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2454 nin[i-1] = new_Bad();
2458 /* We want to pass the Phi0 node to the constructor: this finds additional
2459 optimization possibilities.
2460 The Phi0 node either is allocated in this function, or it comes from
2461 a former call to get_r_value_internal. In this case we may not yet
2462 exchange phi0, as this is done in mature_immBlock. */
2464 phi0_all = block->attr.block.graph_arr[pos];
2465 if (!((get_irn_op(phi0_all) == op_Phi) &&
2466 (get_irn_arity(phi0_all) == 0) &&
2467 (get_nodes_block(phi0_all) == block)))
2473 /* After collecting all predecessors into the array nin a new Phi node
2474 with these predecessors is created. This constructor contains an
2475 optimization: If all predecessors of the Phi node are identical it
2476 returns the only operand instead of a new Phi node. */
2477 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2479 /* In case we allocated a Phi0 node at the beginning of this procedure,
2480 we need to exchange this Phi0 with the real Phi. */
2482 exchange(phi0, res);
2483 block->attr.block.graph_arr[pos] = res;
2484 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2485 only an optimization. */
2492 * This function returns the last definition of a variable. In case
2493 * this variable was last defined in a previous block, Phi nodes are
2494 * inserted. If the part of the firm graph containing the definition
2495 * is not yet constructed, a dummy Phi node is returned.
2498 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2501 /* There are 4 cases to treat.
2503 1. The block is not mature and we visit it the first time. We can not
2504 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2505 predecessors is returned. This node is added to the linked list (field
2506 "link") of the containing block to be completed when this block is
2507 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2510 2. The value is already known in this block, graph_arr[pos] is set and we
2511 visit the block the first time. We can return the value without
2512 creating any new nodes.
2514 3. The block is mature and we visit it the first time. A Phi node needs
2515 to be created (phi_merge). If the Phi is not needed, as all it's
2516 operands are the same value reaching the block through different
2517 paths, it's optimized away and the value itself is returned.
2519 4. The block is mature, and we visit it the second time. Now two
2520 subcases are possible:
2521 * The value was computed completely the last time we were here. This
2522 is the case if there is no loop. We can return the proper value.
2523 * The recursion that visited this node and set the flag did not
2524 return yet. We are computing a value in a loop and need to
2525 break the recursion. This case only happens if we visited
2526 the same block with phi_merge before, which inserted a Phi0.
2527 So we return the Phi0.
2530 /* case 4 -- already visited. */
2531 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2532 /* As phi_merge allocates a Phi0 this value is always defined. Here
2533 is the critical difference of the two algorithms. */
2534 assert(block->attr.block.graph_arr[pos]);
2535 return block->attr.block.graph_arr[pos];
2538 /* visited the first time */
2539 set_irn_visited(block, get_irg_visited(current_ir_graph));
2541 /* Get the local valid value */
2542 res = block->attr.block.graph_arr[pos];
2544 /* case 2 -- If the value is actually computed, return it. */
2545 if (res) { return res; };
2547 if (block->attr.block.matured) { /* case 3 */
2549 /* The Phi has the same amount of ins as the corresponding block. */
2550 int ins = get_irn_arity(block);
2552 NEW_ARR_A (ir_node *, nin, ins);
2554 /* Phi merge collects the predecessors and then creates a node. */
2555 res = phi_merge (block, pos, mode, nin, ins);
2557 } else { /* case 1 */
2558 /* The block is not mature, we don't know how many in's are needed. A Phi
2559 with zero predecessors is created. Such a Phi node is called Phi0
2560 node. The Phi0 is then added to the list of Phi0 nodes in this block
2561 to be matured by mature_immBlock later.
2562 The Phi0 has to remember the pos of it's internal value. If the real
2563 Phi is computed, pos is used to update the array with the local
2565 res = new_rd_Phi0 (current_ir_graph, block, mode);
2566 res->attr.phi0_pos = pos;
2567 res->link = block->link;
2571 /* If we get here, the frontend missed a use-before-definition error */
2574 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2575 assert (mode->code >= irm_F && mode->code <= irm_P);
2576 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2577 get_mode_null(mode));
2580 /* The local valid value is available now. */
2581 block->attr.block.graph_arr[pos] = res;
2584 } /* get_r_value_internal */
2586 #endif /* USE_FAST_PHI_CONSTRUCTION */
2588 /* ************************************************************************** */
2591 * Finalize a Block node, when all control flows are known.
2592 * Acceptable parameters are only Block nodes.
2595 mature_immBlock(ir_node *block)
2601 assert (get_irn_opcode(block) == iro_Block);
2602 /* @@@ should be commented in
2603 assert (!get_Block_matured(block) && "Block already matured"); */
2605 if (!get_Block_matured(block)) {
2606 ins = ARR_LEN (block->in)-1;
2607 /* Fix block parameters */
2608 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2610 /* An array for building the Phi nodes. */
2611 NEW_ARR_A (ir_node *, nin, ins);
2613 /* Traverse a chain of Phi nodes attached to this block and mature
2615 for (n = block->link; n; n=next) {
2616 inc_irg_visited(current_ir_graph);
2618 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2621 block->attr.block.matured = 1;
2623 /* Now, as the block is a finished firm node, we can optimize it.
2624 Since other nodes have been allocated since the block was created
2625 we can not free the node on the obstack. Therefore we have to call
2627 Unfortunately the optimization does not change a lot, as all allocated
2628 nodes refer to the unoptimized node.
2629 We can call _2, as global cse has no effect on blocks. */
2630 block = optimize_in_place_2(block);
2631 IRN_VRFY_IRG(block, current_ir_graph);
2633 } /* mature_immBlock */
2636 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2637 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2641 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2642 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2646 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2647 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2648 } /* new_d_Const_long */
2651 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2652 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2653 } /* new_d_Const_type */
2657 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2658 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2662 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2663 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2667 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2669 assert(arg->op == op_Cond);
2670 arg->attr.cond.kind = fragmentary;
2671 arg->attr.cond.default_proj = max_proj;
2672 res = new_Proj (arg, mode_X, max_proj);
2674 } /* new_d_defaultProj */
2677 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode) {
2678 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2682 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2683 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2684 } /* new_d_strictConv */
2687 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2688 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2692 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2693 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2702 * Allocate the frag array.
2704 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2705 if (get_opt_precise_exc_context()) {
2706 if ((current_ir_graph->phase_state == phase_building) &&
2707 (get_irn_op(res) == op) && /* Could be optimized away. */
2708 !*frag_store) /* Could be a cse where the arr is already set. */ {
2709 *frag_store = new_frag_arr(res);
2712 } /* allocate_frag_arr */
2715 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2717 res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
2718 #if PRECISE_EXC_CONTEXT
2719 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2726 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2728 res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
2729 #if PRECISE_EXC_CONTEXT
2730 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2734 } /* new_d_DivMod */
2737 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2740 res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
2741 #if PRECISE_EXC_CONTEXT
2742 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2749 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2751 res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
2752 #if PRECISE_EXC_CONTEXT
2753 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2772 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2773 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2777 new_d_Jmp(dbg_info *db) {
2778 return new_bd_Jmp(db, current_ir_graph->current_block);
2782 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2783 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2787 new_d_Cond(dbg_info *db, ir_node *c) {
2788 return new_bd_Cond(db, current_ir_graph->current_block, c);
2792 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2796 res = new_bd_Call(db, current_ir_graph->current_block,
2797 store, callee, arity, in, tp);
2798 #if PRECISE_EXC_CONTEXT
2799 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2806 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2807 return new_bd_Return(db, current_ir_graph->current_block,
2809 } /* new_d_Return */
2812 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2814 res = new_bd_Load(db, current_ir_graph->current_block,
2816 #if PRECISE_EXC_CONTEXT
2817 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2824 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2826 res = new_bd_Store(db, current_ir_graph->current_block,
2828 #if PRECISE_EXC_CONTEXT
2829 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2836 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2840 res = new_bd_Alloc(db, current_ir_graph->current_block,
2841 store, size, alloc_type, where);
2842 #if PRECISE_EXC_CONTEXT
2843 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2850 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2851 ir_node *size, ir_type *free_type, where_alloc where)
2853 return new_bd_Free(db, current_ir_graph->current_block,
2854 store, ptr, size, free_type, where);
2858 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
2859 /* GL: objptr was called frame before. Frame was a bad choice for the name
2860 as the operand could as well be a pointer to a dynamic object. */
2862 return new_bd_Sel(db, current_ir_graph->current_block,
2863 store, objptr, 0, NULL, ent);
2864 } /* new_d_simpleSel */
2867 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2869 return new_bd_Sel(db, current_ir_graph->current_block,
2870 store, objptr, n_index, index, sel);
2874 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2876 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2878 } /* new_d_SymConst_type */
2881 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind)
2883 return new_bd_SymConst(db, get_irg_start_block(current_ir_graph),
2885 } /* new_d_SymConst */
2888 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2889 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2895 return _new_d_Bad();
2899 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2900 return new_bd_Confirm(db, current_ir_graph->current_block,
2902 } /* new_d_Confirm */
2905 new_d_Unknown(ir_mode *m) {
2906 return new_bd_Unknown(m);
2907 } /* new_d_Unknown */
2910 new_d_CallBegin(dbg_info *db, ir_node *call) {
2911 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2912 } /* new_d_CallBegin */
2915 new_d_EndReg(dbg_info *db) {
2916 return new_bd_EndReg(db, current_ir_graph->current_block);
2917 } /* new_d_EndReg */
2920 new_d_EndExcept(dbg_info *db) {
2921 return new_bd_EndExcept(db, current_ir_graph->current_block);
2922 } /* new_d_EndExcept */
2925 new_d_Break(dbg_info *db) {
2926 return new_bd_Break(db, current_ir_graph->current_block);
2930 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2931 return new_bd_Filter (db, current_ir_graph->current_block,
2933 } /* new_d_Filter */
2936 (new_d_NoMem)(void) {
2937 return _new_d_NoMem();
2941 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2942 ir_node *ir_true, ir_mode *mode) {
2943 return new_bd_Mux(db, current_ir_graph->current_block,
2944 sel, ir_false, ir_true, mode);
2948 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2949 return new_bd_Psi(db, current_ir_graph->current_block,
2950 arity, conds, vals, mode);
2953 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2954 ir_node *dst, ir_node *src, ir_type *data_type) {
2956 res = new_bd_CopyB(db, current_ir_graph->current_block,
2957 store, dst, src, data_type);
2958 #if PRECISE_EXC_CONTEXT
2959 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2965 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2966 return new_bd_InstOf(db, current_ir_graph->current_block,
2967 store, objptr, type);
2968 } /* new_d_InstOf */
2971 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2972 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2975 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2976 ir_node *idx, ir_node *lower, ir_node *upper) {
2978 res = new_bd_Bound(db, current_ir_graph->current_block,
2979 store, idx, lower, upper);
2980 #if PRECISE_EXC_CONTEXT
2981 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2987 new_d_Pin(dbg_info *db, ir_node *node) {
2988 return new_bd_Pin(db, current_ir_graph->current_block, node);
2991 /* ********************************************************************* */
2992 /* Comfortable interface with automatic Phi node construction. */
2993 /* (Uses also constructors of ?? interface, except new_Block. */
2994 /* ********************************************************************* */
2996 /* Block construction */
2997 /* immature Block without predecessors */
2998 ir_node *new_d_immBlock(dbg_info *db) {
3001 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3002 /* creates a new dynamic in-array as length of in is -1 */
3003 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3004 current_ir_graph->current_block = res;
3005 res->attr.block.matured = 0;
3006 res->attr.block.dead = 0;
3007 /* res->attr.block.exc = exc_normal; */
3008 /* res->attr.block.handler_entry = 0; */
3009 res->attr.block.irg = current_ir_graph;
3010 res->attr.block.backedge = NULL;
3011 res->attr.block.in_cg = NULL;
3012 res->attr.block.cg_backedge = NULL;
3013 set_Block_block_visited(res, 0);
3015 /* Create and initialize array for Phi-node construction. */
3016 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3017 current_ir_graph->n_loc);
3018 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3020 /* Immature block may not be optimized! */
3021 IRN_VRFY_IRG(res, current_ir_graph);
3024 } /* new_d_immBlock */
3027 new_immBlock(void) {
3028 return new_d_immBlock(NULL);
3029 } /* new_immBlock */
3031 /* add an edge to a jmp/control flow node */
3033 add_immBlock_pred(ir_node *block, ir_node *jmp)
3035 if (block->attr.block.matured) {
3036 assert(0 && "Error: Block already matured!\n");
3039 assert(jmp != NULL);
3040 ARR_APP1(ir_node *, block->in, jmp);
3042 } /* add_immBlock_pred */
3044 /* changing the current block */
3046 set_cur_block(ir_node *target) {
3047 current_ir_graph->current_block = target;
3048 } /* set_cur_block */
3050 /* ************************ */
3051 /* parameter administration */
3053 /* get a value from the parameter array from the current block by its index */
3055 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3056 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3057 inc_irg_visited(current_ir_graph);
3059 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3062 /* get a value from the parameter array from the current block by its index */
3064 get_value(int pos, ir_mode *mode) {
3065 return get_d_value(NULL, pos, mode);
3068 /* set a value at position pos in the parameter array from the current block */
3070 set_value(int pos, ir_node *value) {
3071 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3072 assert(pos+1 < current_ir_graph->n_loc);
3073 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3077 find_value(ir_node *value) {
3079 ir_node *bl = current_ir_graph->current_block;
3081 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3082 if (bl->attr.block.graph_arr[i] == value)
3087 /* get the current store */
3091 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3092 /* GL: one could call get_value instead */
3093 inc_irg_visited(current_ir_graph);
3094 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3097 /* set the current store: handles automatic Sync construction for Load nodes */
3099 set_store(ir_node *store)
3101 ir_node *load, *pload, *pred, *in[2];
3103 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3105 if (get_opt_auto_create_sync()) {
3106 /* handle non-volatile Load nodes by automatically creating Sync's */
3107 load = skip_Proj(store);
3108 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3109 pred = get_Load_mem(load);
3111 if (is_Sync(pred)) {
3112 /* a Load after a Sync: move it up */
3113 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3115 set_Load_mem(load, get_memop_mem(mem));
3116 add_Sync_pred(pred, store);
3120 pload = skip_Proj(pred);
3121 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3122 /* a Load after a Load: create a new Sync */
3123 set_Load_mem(load, get_Load_mem(pload));
3127 store = new_Sync(2, in);
3132 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3136 keep_alive(ir_node *ka) {
3137 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3140 /* --- Useful access routines --- */
3141 /* Returns the current block of the current graph. To set the current
3142 block use set_cur_block. */
3143 ir_node *get_cur_block(void) {
3144 return get_irg_current_block(current_ir_graph);
3145 } /* get_cur_block */
3147 /* Returns the frame type of the current graph */
3148 ir_type *get_cur_frame_type(void) {
3149 return get_irg_frame_type(current_ir_graph);
3150 } /* get_cur_frame_type */
3153 /* ********************************************************************* */
3156 /* call once for each run of the library */
3158 init_cons(uninitialized_local_variable_func_t *func) {
3159 default_initialize_local_variable = func;
3163 irp_finalize_cons(void) {
3165 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3166 irg_finalize_cons(get_irp_irg(i));
3168 irp->phase_state = phase_high;
3169 } /* irp_finalize_cons */
3172 ir_node *new_Block(int arity, ir_node **in) {
3173 return new_d_Block(NULL, arity, in);
3175 ir_node *new_Start (void) {
3176 return new_d_Start(NULL);
3178 ir_node *new_End (void) {
3179 return new_d_End(NULL);
3181 ir_node *new_Jmp (void) {
3182 return new_d_Jmp(NULL);
3184 ir_node *new_IJmp (ir_node *tgt) {
3185 return new_d_IJmp(NULL, tgt);
3187 ir_node *new_Cond (ir_node *c) {
3188 return new_d_Cond(NULL, c);
3190 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3191 return new_d_Return(NULL, store, arity, in);
3193 ir_node *new_Const (ir_mode *mode, tarval *con) {
3194 return new_d_Const(NULL, mode, con);
3197 ir_node *new_Const_long(ir_mode *mode, long value)
3199 return new_d_Const_long(NULL, mode, value);
3202 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3203 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3206 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3207 return new_d_SymConst(NULL, value, kind);
3209 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3210 return new_d_simpleSel(NULL, store, objptr, ent);
3212 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3214 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3216 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3218 return new_d_Call(NULL, store, callee, arity, in, tp);
3220 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3221 return new_d_Add(NULL, op1, op2, mode);
3223 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3224 return new_d_Sub(NULL, op1, op2, mode);
3226 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3227 return new_d_Minus(NULL, op, mode);
3229 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3230 return new_d_Mul(NULL, op1, op2, mode);
3232 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3233 return new_d_Quot(NULL, memop, op1, op2);
3235 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3236 return new_d_DivMod(NULL, memop, op1, op2);
3238 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3239 return new_d_Div(NULL, memop, op1, op2);
3241 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3242 return new_d_Mod(NULL, memop, op1, op2);
3244 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3245 return new_d_Abs(NULL, op, mode);
3247 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3248 return new_d_And(NULL, op1, op2, mode);
3250 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3251 return new_d_Or(NULL, op1, op2, mode);
3253 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3254 return new_d_Eor(NULL, op1, op2, mode);
3256 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3257 return new_d_Not(NULL, op, mode);
3259 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3260 return new_d_Shl(NULL, op, k, mode);
3262 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3263 return new_d_Shr(NULL, op, k, mode);
3265 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3266 return new_d_Shrs(NULL, op, k, mode);
3268 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3269 return new_d_Rot(NULL, op, k, mode);
3271 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3272 return new_d_Carry(NULL, op1, op2, mode);
3274 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3275 return new_d_Borrow(NULL, op1, op2, mode);
3277 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3278 return new_d_Cmp(NULL, op1, op2);
3280 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3281 return new_d_Conv(NULL, op, mode);
3283 ir_node *new_strictConv (ir_node *op, ir_mode *mode) {
3284 return new_d_strictConv(NULL, op, mode);
3286 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3287 return new_d_Cast(NULL, op, to_tp);
3289 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3290 return new_d_Phi(NULL, arity, in, mode);
3292 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3293 return new_d_Load(NULL, store, addr, mode);
3295 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3296 return new_d_Store(NULL, store, addr, val);
3298 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3299 where_alloc where) {
3300 return new_d_Alloc(NULL, store, size, alloc_type, where);
3302 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3303 ir_type *free_type, where_alloc where) {
3304 return new_d_Free(NULL, store, ptr, size, free_type, where);
3306 ir_node *new_Sync (int arity, ir_node *in[]) {
3307 return new_d_Sync(NULL, arity, in);
3309 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3310 return new_d_Proj(NULL, arg, mode, proj);
3312 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3313 return new_d_defaultProj(NULL, arg, max_proj);
3315 ir_node *new_Tuple (int arity, ir_node **in) {
3316 return new_d_Tuple(NULL, arity, in);
3318 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3319 return new_d_Id(NULL, val, mode);
3321 ir_node *new_Bad (void) {
3324 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3325 return new_d_Confirm (NULL, val, bound, cmp);
3327 ir_node *new_Unknown(ir_mode *m) {
3328 return new_d_Unknown(m);
3330 ir_node *new_CallBegin (ir_node *callee) {
3331 return new_d_CallBegin(NULL, callee);
3333 ir_node *new_EndReg (void) {
3334 return new_d_EndReg(NULL);
3336 ir_node *new_EndExcept (void) {
3337 return new_d_EndExcept(NULL);
3339 ir_node *new_Break (void) {
3340 return new_d_Break(NULL);
3342 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3343 return new_d_Filter(NULL, arg, mode, proj);
3345 ir_node *new_NoMem (void) {
3346 return new_d_NoMem();
3348 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3349 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3351 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3352 return new_d_Psi(NULL, arity, conds, vals, mode);
3354 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3355 return new_d_CopyB(NULL, store, dst, src, data_type);
3357 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3358 return new_d_InstOf (NULL, store, objptr, ent);
3360 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3361 return new_d_Raise(NULL, store, obj);
3363 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3364 return new_d_Bound(NULL, store, idx, lower, upper);
3366 ir_node *new_Pin(ir_node *node) {
3367 return new_d_Pin(NULL, node);