3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
29 #include "irgraph_t.h"
33 #include "firm_common_t.h"
39 #include "irbackedge_t.h"
41 #include "iredges_t.h"
44 #if USE_EXPLICIT_PHI_IN_STACK
45 /* A stack needed for the automatic Phi node construction in constructor
46 Phi_in. Redefinition in irgraph.c!! */
51 typedef struct Phi_in_stack Phi_in_stack;
54 /* when we need verifying */
56 # define IRN_VRFY_IRG(res, irg)
58 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
62 * Language dependent variable initialization callback.
64 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* creates a bd constructor for a binop */
67 #define NEW_BD_BINOP(instr) \
69 new_bd_##instr(dbg_info *db, ir_node *block, \
70 ir_node *op1, ir_node *op2, ir_mode *mode) \
74 ir_graph *irg = current_ir_graph; \
77 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
78 res = optimize_node(res); \
79 IRN_VRFY_IRG(res, irg); \
83 /* creates a bd constructor for an unop */
84 #define NEW_BD_UNOP(instr) \
86 new_bd_##instr(dbg_info *db, ir_node *block, \
87 ir_node *op, ir_mode *mode) \
90 ir_graph *irg = current_ir_graph; \
91 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
92 res = optimize_node(res); \
93 IRN_VRFY_IRG(res, irg); \
97 /* creates a bd constructor for an divop */
98 #define NEW_BD_DIVOP(instr) \
100 new_bd_##instr(dbg_info *db, ir_node *block, \
101 ir_node *memop, ir_node *op1, ir_node *op2) \
105 ir_graph *irg = current_ir_graph; \
109 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
110 res = optimize_node(res); \
111 IRN_VRFY_IRG(res, irg); \
115 /* creates a rd constructor for a binop */
116 #define NEW_RD_BINOP(instr) \
118 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
119 ir_node *op1, ir_node *op2, ir_mode *mode) \
122 ir_graph *rem = current_ir_graph; \
123 current_ir_graph = irg; \
124 res = new_bd_##instr(db, block, op1, op2, mode); \
125 current_ir_graph = rem; \
129 /* creates a rd constructor for an unop */
130 #define NEW_RD_UNOP(instr) \
132 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
133 ir_node *op, ir_mode *mode) \
136 ir_graph *rem = current_ir_graph; \
137 current_ir_graph = irg; \
138 res = new_bd_##instr(db, block, op, mode); \
139 current_ir_graph = rem; \
143 /* creates a rd constructor for an divop */
144 #define NEW_RD_DIVOP(instr) \
146 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
147 ir_node *memop, ir_node *op1, ir_node *op2) \
150 ir_graph *rem = current_ir_graph; \
151 current_ir_graph = irg; \
152 res = new_bd_##instr(db, block, memop, op1, op2); \
153 current_ir_graph = rem; \
157 /* creates a d constructor for an binop */
158 #define NEW_D_BINOP(instr) \
160 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
161 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
164 /* creates a d constructor for an unop */
165 #define NEW_D_UNOP(instr) \
167 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
168 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
173 * Constructs a Block with a fixed number of predecessors.
174 * Does not set current_block. Can not be used with automatic
175 * Phi node construction.
178 new_bd_Block(dbg_info *db, int arity, ir_node **in)
181 ir_graph *irg = current_ir_graph;
183 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
184 set_Block_matured(res, 1);
185 set_Block_block_visited(res, 0);
187 /* res->attr.block.exc = exc_normal; */
188 /* res->attr.block.handler_entry = 0; */
189 res->attr.block.dead = 0;
190 res->attr.block.irg = irg;
191 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
192 res->attr.block.in_cg = NULL;
193 res->attr.block.cg_backedge = NULL;
194 res->attr.block.extblk = NULL;
196 IRN_VRFY_IRG(res, irg);
201 new_bd_Start(dbg_info *db, ir_node *block)
204 ir_graph *irg = current_ir_graph;
206 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
207 /* res->attr.start.irg = irg; */
209 IRN_VRFY_IRG(res, irg);
214 new_bd_End(dbg_info *db, ir_node *block)
217 ir_graph *irg = current_ir_graph;
219 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
221 IRN_VRFY_IRG(res, irg);
226 * Creates a Phi node with all predecessors. Calling this constructor
227 * is only allowed if the corresponding block is mature.
230 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
233 ir_graph *irg = current_ir_graph;
237 /* Don't assert that block matured: the use of this constructor is strongly
239 if ( get_Block_matured(block) )
240 assert( get_irn_arity(block) == arity );
242 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
244 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
246 for (i = arity-1; i >= 0; i--)
247 if (get_irn_op(in[i]) == op_Unknown) {
252 if (!has_unknown) res = optimize_node (res);
253 IRN_VRFY_IRG(res, irg);
255 /* Memory Phis in endless loops must be kept alive.
256 As we can't distinguish these easily we keep all of them alive. */
257 if ((res->op == op_Phi) && (mode == mode_M))
258 add_End_keepalive(get_irg_end(irg), res);
263 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
266 ir_graph *irg = current_ir_graph;
268 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
269 res->attr.con.tv = con;
270 set_Const_type(res, tp); /* Call method because of complex assertion. */
271 res = optimize_node (res);
272 assert(get_Const_type(res) == tp);
273 IRN_VRFY_IRG(res, irg);
276 } /* new_bd_Const_type */
279 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
281 ir_graph *irg = current_ir_graph;
283 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
287 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value)
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
292 } /* new_bd_Const_long */
295 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
298 ir_graph *irg = current_ir_graph;
300 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
301 res = optimize_node(res);
302 IRN_VRFY_IRG(res, irg);
307 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
314 res->attr.proj = proj;
317 assert(get_Proj_pred(res));
318 assert(get_nodes_block(get_Proj_pred(res)));
320 res = optimize_node(res);
322 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
332 ir_graph *irg = current_ir_graph;
334 assert(arg->op == op_Cond);
335 arg->attr.cond.kind = fragmentary;
336 arg->attr.cond.default_proj = max_proj;
337 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
339 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag)
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
355 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
358 ir_graph *irg = current_ir_graph;
360 assert(is_atomic_type(to_tp));
362 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
363 res->attr.cast.totype = to_tp;
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in)
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
402 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
406 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Jmp(dbg_info *db, ir_node *block)
419 ir_graph *irg = current_ir_graph;
421 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
422 res = optimize_node (res);
423 IRN_VRFY_IRG (res, irg);
428 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt)
431 ir_graph *irg = current_ir_graph;
433 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
434 res = optimize_node (res);
435 IRN_VRFY_IRG (res, irg);
437 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
443 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c)
446 ir_graph *irg = current_ir_graph;
448 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
449 res->attr.cond.kind = dense;
450 res->attr.cond.default_proj = 0;
451 res->attr.cond.pred = COND_JMP_PRED_NONE;
452 res = optimize_node (res);
453 IRN_VRFY_IRG(res, irg);
458 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
459 ir_node *callee, int arity, ir_node **in, ir_type *tp)
464 ir_graph *irg = current_ir_graph;
467 NEW_ARR_A(ir_node *, r_in, r_arity);
470 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
472 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
474 assert((get_unknown_type() == tp) || is_Method_type(tp));
475 set_Call_type(res, tp);
476 res->attr.call.exc.pin_state = op_pin_state_pinned;
477 res->attr.call.callee_arr = NULL;
478 res = optimize_node(res);
479 IRN_VRFY_IRG(res, irg);
484 new_bd_Return (dbg_info *db, ir_node *block,
485 ir_node *store, int arity, ir_node **in)
490 ir_graph *irg = current_ir_graph;
493 NEW_ARR_A (ir_node *, r_in, r_arity);
495 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
496 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
497 res = optimize_node(res);
498 IRN_VRFY_IRG(res, irg);
503 new_bd_Load(dbg_info *db, ir_node *block,
504 ir_node *store, ir_node *adr, ir_mode *mode)
508 ir_graph *irg = current_ir_graph;
512 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
513 res->attr.load.exc.pin_state = op_pin_state_pinned;
514 res->attr.load.load_mode = mode;
515 res->attr.load.volatility = volatility_non_volatile;
516 res = optimize_node(res);
517 IRN_VRFY_IRG(res, irg);
522 new_bd_Store(dbg_info *db, ir_node *block,
523 ir_node *store, ir_node *adr, ir_node *val)
527 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
533 res->attr.store.exc.pin_state = op_pin_state_pinned;
534 res->attr.store.volatility = volatility_non_volatile;
535 res = optimize_node(res);
536 IRN_VRFY_IRG(res, irg);
541 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
542 ir_node *size, ir_type *alloc_type, where_alloc where)
546 ir_graph *irg = current_ir_graph;
550 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
551 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
552 res->attr.alloc.where = where;
553 res->attr.alloc.type = alloc_type;
554 res = optimize_node(res);
555 IRN_VRFY_IRG(res, irg);
560 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
561 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
565 ir_graph *irg = current_ir_graph;
570 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
571 res->attr.free.where = where;
572 res->attr.free.type = free_type;
573 res = optimize_node(res);
574 IRN_VRFY_IRG(res, irg);
579 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
580 int arity, ir_node **in, entity *ent)
585 ir_graph *irg = current_ir_graph;
587 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
590 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
593 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
595 * FIXM: Sel's can select functions which should be of mode mode_P_code.
597 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
598 res->attr.sel.ent = ent;
599 res = optimize_node(res);
600 IRN_VRFY_IRG(res, irg);
605 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
606 symconst_kind symkind, ir_type *tp) {
609 ir_graph *irg = current_ir_graph;
611 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
612 mode = mode_P_data; /* FIXME: can be mode_P_code */
616 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
618 res->attr.symc.num = symkind;
619 res->attr.symc.sym = value;
620 res->attr.symc.tp = tp;
622 res = optimize_node(res);
623 IRN_VRFY_IRG(res, irg);
625 } /* new_bd_SymConst_type */
628 new_bd_Sync(dbg_info *db, ir_node *block)
631 ir_graph *irg = current_ir_graph;
633 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
634 /* no need to call optimize node here, Sync are always created with no predecessors */
635 IRN_VRFY_IRG(res, irg);
640 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
642 ir_node *in[2], *res;
643 ir_graph *irg = current_ir_graph;
647 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
648 res->attr.confirm_cmp = cmp;
649 res = optimize_node (res);
650 IRN_VRFY_IRG(res, irg);
654 /* this function is often called with current_ir_graph unset */
656 new_bd_Unknown(ir_mode *m)
659 ir_graph *irg = current_ir_graph;
661 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
662 res = optimize_node(res);
664 } /* new_bd_Unknown */
667 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call)
671 ir_graph *irg = current_ir_graph;
673 in[0] = get_Call_ptr(call);
674 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
675 /* res->attr.callbegin.irg = irg; */
676 res->attr.callbegin.call = call;
677 res = optimize_node(res);
678 IRN_VRFY_IRG(res, irg);
680 } /* new_bd_CallBegin */
683 new_bd_EndReg(dbg_info *db, ir_node *block)
686 ir_graph *irg = current_ir_graph;
688 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
689 set_irg_end_reg(irg, res);
690 IRN_VRFY_IRG(res, irg);
692 } /* new_bd_EndReg */
695 new_bd_EndExcept(dbg_info *db, ir_node *block)
698 ir_graph *irg = current_ir_graph;
700 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
701 set_irg_end_except(irg, res);
702 IRN_VRFY_IRG (res, irg);
704 } /* new_bd_EndExcept */
707 new_bd_Break(dbg_info *db, ir_node *block)
710 ir_graph *irg = current_ir_graph;
712 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
713 res = optimize_node(res);
714 IRN_VRFY_IRG(res, irg);
719 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
723 ir_graph *irg = current_ir_graph;
725 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
726 res->attr.filter.proj = proj;
727 res->attr.filter.in_cg = NULL;
728 res->attr.filter.backedge = NULL;
731 assert(get_Proj_pred(res));
732 assert(get_nodes_block(get_Proj_pred(res)));
734 res = optimize_node(res);
735 IRN_VRFY_IRG(res, irg);
737 } /* new_bd_Filter */
740 new_bd_Mux(dbg_info *db, ir_node *block,
741 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
745 ir_graph *irg = current_ir_graph;
751 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
754 res = optimize_node(res);
755 IRN_VRFY_IRG(res, irg);
760 new_bd_Psi(dbg_info *db, ir_node *block,
761 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
765 ir_graph *irg = current_ir_graph;
768 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
770 for (i = 0; i < arity; ++i) {
772 in[2 * i + 1] = vals[i];
776 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
779 res = optimize_node(res);
780 IRN_VRFY_IRG(res, irg);
785 new_bd_CopyB(dbg_info *db, ir_node *block,
786 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
790 ir_graph *irg = current_ir_graph;
796 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
798 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
799 res->attr.copyb.data_type = data_type;
800 res = optimize_node(res);
801 IRN_VRFY_IRG(res, irg);
806 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
807 ir_node *objptr, ir_type *type)
811 ir_graph *irg = current_ir_graph;
815 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
816 res->attr.instof.type = type;
817 res = optimize_node(res);
818 IRN_VRFY_IRG(res, irg);
820 } /* new_bd_InstOf */
823 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
827 ir_graph *irg = current_ir_graph;
831 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
832 res = optimize_node(res);
833 IRN_VRFY_IRG(res, irg);
838 new_bd_Bound(dbg_info *db, ir_node *block,
839 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
843 ir_graph *irg = current_ir_graph;
849 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
850 res->attr.bound.exc.pin_state = op_pin_state_pinned;
851 res = optimize_node(res);
852 IRN_VRFY_IRG(res, irg);
857 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node)
860 ir_graph *irg = current_ir_graph;
862 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
863 res = optimize_node(res);
864 IRN_VRFY_IRG(res, irg);
868 /* --------------------------------------------- */
869 /* private interfaces, for professional use only */
870 /* --------------------------------------------- */
872 /* Constructs a Block with a fixed number of predecessors.
873 Does not set current_block. Can not be used with automatic
874 Phi node construction. */
876 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in)
878 ir_graph *rem = current_ir_graph;
881 current_ir_graph = irg;
882 res = new_bd_Block(db, arity, in);
883 current_ir_graph = rem;
889 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
891 ir_graph *rem = current_ir_graph;
894 current_ir_graph = irg;
895 res = new_bd_Start(db, block);
896 current_ir_graph = rem;
902 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
905 ir_graph *rem = current_ir_graph;
907 current_ir_graph = rem;
908 res = new_bd_End(db, block);
909 current_ir_graph = rem;
914 /* Creates a Phi node with all predecessors. Calling this constructor
915 is only allowed if the corresponding block is mature. */
917 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
920 ir_graph *rem = current_ir_graph;
922 current_ir_graph = irg;
923 res = new_bd_Phi(db, block,arity, in, mode);
924 current_ir_graph = rem;
930 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
933 ir_graph *rem = current_ir_graph;
935 current_ir_graph = irg;
936 res = new_bd_Const_type(db, block, mode, con, tp);
937 current_ir_graph = rem;
940 } /* new_rd_Const_type */
943 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
946 ir_graph *rem = current_ir_graph;
948 current_ir_graph = irg;
949 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
950 current_ir_graph = rem;
956 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
958 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
959 } /* new_rd_Const_long */
962 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
965 ir_graph *rem = current_ir_graph;
967 current_ir_graph = irg;
968 res = new_bd_Id(db, block, val, mode);
969 current_ir_graph = rem;
975 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_Proj(db, block, arg, mode, proj);
983 current_ir_graph = rem;
989 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
993 ir_graph *rem = current_ir_graph;
995 current_ir_graph = irg;
996 res = new_bd_defaultProj(db, block, arg, max_proj);
997 current_ir_graph = rem;
1000 } /* new_rd_defaultProj */
1003 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1006 ir_graph *rem = current_ir_graph;
1008 current_ir_graph = irg;
1009 res = new_bd_Conv(db, block, op, mode, 0);
1010 current_ir_graph = rem;
1016 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1019 ir_graph *rem = current_ir_graph;
1021 current_ir_graph = irg;
1022 res = new_bd_Cast(db, block, op, to_tp);
1023 current_ir_graph = rem;
1029 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1032 ir_graph *rem = current_ir_graph;
1034 current_ir_graph = irg;
1035 res = new_bd_Tuple(db, block, arity, in);
1036 current_ir_graph = rem;
1039 } /* new_rd_Tuple */
1046 NEW_RD_DIVOP(DivMod)
1059 NEW_RD_BINOP(Borrow)
1062 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1063 ir_node *op1, ir_node *op2)
1066 ir_graph *rem = current_ir_graph;
1068 current_ir_graph = irg;
1069 res = new_bd_Cmp(db, block, op1, op2);
1070 current_ir_graph = rem;
1076 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block)
1079 ir_graph *rem = current_ir_graph;
1081 current_ir_graph = irg;
1082 res = new_bd_Jmp(db, block);
1083 current_ir_graph = rem;
1089 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_IJmp(db, block, tgt);
1096 current_ir_graph = rem;
1102 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1105 ir_graph *rem = current_ir_graph;
1107 current_ir_graph = irg;
1108 res = new_bd_Cond(db, block, c);
1109 current_ir_graph = rem;
1115 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1116 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1119 ir_graph *rem = current_ir_graph;
1121 current_ir_graph = irg;
1122 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1123 current_ir_graph = rem;
1129 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1130 ir_node *store, int arity, ir_node **in)
1133 ir_graph *rem = current_ir_graph;
1135 current_ir_graph = irg;
1136 res = new_bd_Return(db, block, store, arity, in);
1137 current_ir_graph = rem;
1140 } /* new_rd_Return */
1143 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1144 ir_node *store, ir_node *adr, ir_mode *mode)
1147 ir_graph *rem = current_ir_graph;
1149 current_ir_graph = irg;
1150 res = new_bd_Load(db, block, store, adr, mode);
1151 current_ir_graph = rem;
1157 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1158 ir_node *store, ir_node *adr, ir_node *val)
1161 ir_graph *rem = current_ir_graph;
1163 current_ir_graph = irg;
1164 res = new_bd_Store(db, block, store, adr, val);
1165 current_ir_graph = rem;
1168 } /* new_rd_Store */
1171 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1172 ir_node *size, ir_type *alloc_type, where_alloc where)
1175 ir_graph *rem = current_ir_graph;
1177 current_ir_graph = irg;
1178 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1179 current_ir_graph = rem;
1182 } /* new_rd_Alloc */
1185 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1186 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1189 ir_graph *rem = current_ir_graph;
1191 current_ir_graph = irg;
1192 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1193 current_ir_graph = rem;
1199 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1200 ir_node *store, ir_node *objptr, entity *ent)
1203 ir_graph *rem = current_ir_graph;
1205 current_ir_graph = irg;
1206 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1207 current_ir_graph = rem;
1210 } /* new_rd_simpleSel */
1213 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1214 int arity, ir_node **in, entity *ent)
1217 ir_graph *rem = current_ir_graph;
1219 current_ir_graph = irg;
1220 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1221 current_ir_graph = rem;
1227 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1228 symconst_kind symkind, ir_type *tp)
1231 ir_graph *rem = current_ir_graph;
1233 current_ir_graph = irg;
1234 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1235 current_ir_graph = rem;
1238 } /* new_rd_SymConst_type */
1241 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1242 symconst_kind symkind)
1244 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1246 } /* new_rd_SymConst */
1248 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1250 symconst_symbol sym = {(ir_type *)symbol};
1251 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1252 } /* new_rd_SymConst_addr_ent */
1254 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1255 symconst_symbol sym = {(ir_type *)symbol};
1256 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1257 } /* new_rd_SymConst_addr_name */
1259 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1260 symconst_symbol sym = {symbol};
1261 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1262 } /* new_rd_SymConst_type_tag */
1264 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1265 symconst_symbol sym = {symbol};
1266 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1267 } /* new_rd_SymConst_size */
1269 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1270 symconst_symbol sym = {symbol};
1271 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1272 } /* new_rd_SymConst_align */
1275 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1278 ir_graph *rem = current_ir_graph;
1281 current_ir_graph = irg;
1282 res = new_bd_Sync(db, block);
1283 current_ir_graph = rem;
1285 for (i = 0; i < arity; ++i)
1286 add_Sync_pred(res, in[i]);
1292 new_rd_Bad(ir_graph *irg) {
1293 return get_irg_bad(irg);
1297 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1300 ir_graph *rem = current_ir_graph;
1302 current_ir_graph = irg;
1303 res = new_bd_Confirm(db, block, val, bound, cmp);
1304 current_ir_graph = rem;
1307 } /* new_rd_Confirm */
1309 /* this function is often called with current_ir_graph unset */
1311 new_rd_Unknown(ir_graph *irg, ir_mode *m)
1314 ir_graph *rem = current_ir_graph;
1316 current_ir_graph = irg;
1317 res = new_bd_Unknown(m);
1318 current_ir_graph = rem;
1321 } /* new_rd_Unknown */
1324 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1327 ir_graph *rem = current_ir_graph;
1329 current_ir_graph = irg;
1330 res = new_bd_CallBegin(db, block, call);
1331 current_ir_graph = rem;
1334 } /* new_rd_CallBegin */
1337 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
1341 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1342 set_irg_end_reg(irg, res);
1343 IRN_VRFY_IRG(res, irg);
1345 } /* new_rd_EndReg */
1348 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
1352 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1353 set_irg_end_except(irg, res);
1354 IRN_VRFY_IRG (res, irg);
1356 } /* new_rd_EndExcept */
1359 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block)
1362 ir_graph *rem = current_ir_graph;
1364 current_ir_graph = irg;
1365 res = new_bd_Break(db, block);
1366 current_ir_graph = rem;
1369 } /* new_rd_Break */
1372 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1376 ir_graph *rem = current_ir_graph;
1378 current_ir_graph = irg;
1379 res = new_bd_Filter(db, block, arg, mode, proj);
1380 current_ir_graph = rem;
1383 } /* new_rd_Filter */
1386 new_rd_NoMem(ir_graph *irg) {
1387 return get_irg_no_mem(irg);
1388 } /* new_rd_NoMem */
1391 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1392 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1395 ir_graph *rem = current_ir_graph;
1397 current_ir_graph = irg;
1398 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1399 current_ir_graph = rem;
1405 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1406 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1409 ir_graph *rem = current_ir_graph;
1411 current_ir_graph = irg;
1412 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1413 current_ir_graph = rem;
1418 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1419 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1422 ir_graph *rem = current_ir_graph;
1424 current_ir_graph = irg;
1425 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1426 current_ir_graph = rem;
1429 } /* new_rd_CopyB */
1432 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1433 ir_node *objptr, ir_type *type)
1436 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1439 res = new_bd_InstOf(db, block, store, objptr, type);
1440 current_ir_graph = rem;
1443 } /* new_rd_InstOf */
1446 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1449 ir_graph *rem = current_ir_graph;
1451 current_ir_graph = irg;
1452 res = new_bd_Raise(db, block, store, obj);
1453 current_ir_graph = rem;
1456 } /* new_rd_Raise */
1458 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1459 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1462 ir_graph *rem = current_ir_graph;
1464 current_ir_graph = irg;
1465 res = new_bd_Bound(db, block, store, idx, lower, upper);
1466 current_ir_graph = rem;
1469 } /* new_rd_Bound */
1471 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node)
1474 ir_graph *rem = current_ir_graph;
1476 current_ir_graph = irg;
1477 res = new_bd_Pin(db, block, node);
1478 current_ir_graph = rem;
1483 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1484 return new_rd_Block(NULL, irg, arity, in);
1486 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1487 return new_rd_Start(NULL, irg, block);
1489 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1490 return new_rd_End(NULL, irg, block);
1492 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1493 return new_rd_Jmp(NULL, irg, block);
1495 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1496 return new_rd_IJmp(NULL, irg, block, tgt);
1498 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1499 return new_rd_Cond(NULL, irg, block, c);
1501 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1502 ir_node *store, int arity, ir_node **in) {
1503 return new_rd_Return(NULL, irg, block, store, arity, in);
1505 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1506 ir_mode *mode, tarval *con) {
1507 return new_rd_Const(NULL, irg, block, mode, con);
1509 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1510 ir_mode *mode, long value) {
1511 return new_rd_Const_long(NULL, irg, block, mode, value);
1513 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1514 ir_mode *mode, tarval *con, ir_type *tp) {
1515 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1517 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1518 symconst_symbol value, symconst_kind symkind) {
1519 return new_rd_SymConst(NULL, irg, block, value, symkind);
1521 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1522 ir_node *objptr, entity *ent) {
1523 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1525 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1526 ir_node *objptr, int n_index, ir_node **index,
1528 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1530 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1531 ir_node *callee, int arity, ir_node **in,
1533 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1535 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1536 ir_node *op1, ir_node *op2, ir_mode *mode) {
1537 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1539 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1540 ir_node *op1, ir_node *op2, ir_mode *mode) {
1541 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1543 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1544 ir_node *op, ir_mode *mode) {
1545 return new_rd_Minus(NULL, irg, block, op, mode);
1547 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1548 ir_node *op1, ir_node *op2, ir_mode *mode) {
1549 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1551 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1552 ir_node *memop, ir_node *op1, ir_node *op2) {
1553 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1555 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1556 ir_node *memop, ir_node *op1, ir_node *op2) {
1557 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1559 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1560 ir_node *memop, ir_node *op1, ir_node *op2) {
1561 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1563 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1564 ir_node *memop, ir_node *op1, ir_node *op2) {
1565 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1567 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1568 ir_node *op, ir_mode *mode) {
1569 return new_rd_Abs(NULL, irg, block, op, mode);
1571 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1572 ir_node *op1, ir_node *op2, ir_mode *mode) {
1573 return new_rd_And(NULL, irg, block, op1, op2, mode);
1575 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1576 ir_node *op1, ir_node *op2, ir_mode *mode) {
1577 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1579 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1580 ir_node *op1, ir_node *op2, ir_mode *mode) {
1581 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1583 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1584 ir_node *op, ir_mode *mode) {
1585 return new_rd_Not(NULL, irg, block, op, mode);
1587 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1588 ir_node *op, ir_node *k, ir_mode *mode) {
1589 return new_rd_Shl(NULL, irg, block, op, k, mode);
1591 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1592 ir_node *op, ir_node *k, ir_mode *mode) {
1593 return new_rd_Shr(NULL, irg, block, op, k, mode);
1595 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1596 ir_node *op, ir_node *k, ir_mode *mode) {
1597 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1599 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1600 ir_node *op, ir_node *k, ir_mode *mode) {
1601 return new_rd_Rot(NULL, irg, block, op, k, mode);
1603 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1604 ir_node *op, ir_node *k, ir_mode *mode) {
1605 return new_rd_Carry(NULL, irg, block, op, k, mode);
1607 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1608 ir_node *op, ir_node *k, ir_mode *mode) {
1609 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1611 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1612 ir_node *op1, ir_node *op2) {
1613 return new_rd_Cmp(NULL, irg, block, op1, op2);
1615 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1616 ir_node *op, ir_mode *mode) {
1617 return new_rd_Conv(NULL, irg, block, op, mode);
1619 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1620 return new_rd_Cast(NULL, irg, block, op, to_tp);
1622 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1623 ir_node **in, ir_mode *mode) {
1624 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1626 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1627 ir_node *store, ir_node *adr, ir_mode *mode) {
1628 return new_rd_Load(NULL, irg, block, store, adr, mode);
1630 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1631 ir_node *store, ir_node *adr, ir_node *val) {
1632 return new_rd_Store(NULL, irg, block, store, adr, val);
1634 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1635 ir_node *size, ir_type *alloc_type, where_alloc where) {
1636 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1638 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1639 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1640 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1642 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1643 return new_rd_Sync(NULL, irg, block, arity, in);
1645 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1646 ir_mode *mode, long proj) {
1647 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1649 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1651 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1653 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1654 int arity, ir_node **in) {
1655 return new_rd_Tuple(NULL, irg, block, arity, in );
1657 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1658 ir_node *val, ir_mode *mode) {
1659 return new_rd_Id(NULL, irg, block, val, mode);
1661 ir_node *new_r_Bad (ir_graph *irg) {
1662 return new_rd_Bad(irg);
1664 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1665 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1667 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1668 return new_rd_Unknown(irg, m);
1670 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1671 return new_rd_CallBegin(NULL, irg, block, callee);
1673 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1674 return new_rd_EndReg(NULL, irg, block);
1676 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1677 return new_rd_EndExcept(NULL, irg, block);
1679 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1680 return new_rd_Break(NULL, irg, block);
1682 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1683 ir_mode *mode, long proj) {
1684 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1686 ir_node *new_r_NoMem (ir_graph *irg) {
1687 return new_rd_NoMem(irg);
1689 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1690 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1691 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1693 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1694 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1695 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1697 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1698 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1699 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1701 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1703 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1705 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1706 ir_node *store, ir_node *obj) {
1707 return new_rd_Raise(NULL, irg, block, store, obj);
1709 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1710 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1711 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1713 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1714 return new_rd_Pin(NULL, irg, block, node);
1717 /** ********************/
1718 /** public interfaces */
1719 /** construction tools */
1723 * - create a new Start node in the current block
1725 * @return s - pointer to the created Start node
1730 new_d_Start(dbg_info *db)
1734 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1735 op_Start, mode_T, 0, NULL);
1736 /* res->attr.start.irg = current_ir_graph; */
1738 res = optimize_node(res);
1739 IRN_VRFY_IRG(res, current_ir_graph);
1744 new_d_End(dbg_info *db)
1747 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1748 op_End, mode_X, -1, NULL);
1749 res = optimize_node(res);
1750 IRN_VRFY_IRG(res, current_ir_graph);
1755 /* Constructs a Block with a fixed number of predecessors.
1756 Does set current_block. Can be used with automatic Phi
1757 node construction. */
1759 new_d_Block(dbg_info *db, int arity, ir_node **in)
1763 int has_unknown = 0;
1765 res = new_bd_Block(db, arity, in);
1767 /* Create and initialize array for Phi-node construction. */
1768 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1769 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1770 current_ir_graph->n_loc);
1771 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1774 for (i = arity-1; i >= 0; i--)
1775 if (get_irn_op(in[i]) == op_Unknown) {
1780 if (!has_unknown) res = optimize_node(res);
1781 current_ir_graph->current_block = res;
1783 IRN_VRFY_IRG(res, current_ir_graph);
1788 /* ***********************************************************************/
1789 /* Methods necessary for automatic Phi node creation */
1791 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1792 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1793 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1794 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1796 Call Graph: ( A ---> B == A "calls" B)
1798 get_value mature_immBlock
1806 get_r_value_internal |
1810 new_rd_Phi0 new_rd_Phi_in
1812 * *************************************************************************** */
1814 /** Creates a Phi node with 0 predecessors. */
1815 static INLINE ir_node *
1816 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
1820 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1821 IRN_VRFY_IRG(res, irg);
1825 /* There are two implementations of the Phi node construction. The first
1826 is faster, but does not work for blocks with more than 2 predecessors.
1827 The second works always but is slower and causes more unnecessary Phi
1829 Select the implementations by the following preprocessor flag set in
1831 #if USE_FAST_PHI_CONSTRUCTION
1833 /* This is a stack used for allocating and deallocating nodes in
1834 new_rd_Phi_in. The original implementation used the obstack
1835 to model this stack, now it is explicit. This reduces side effects.
1837 #if USE_EXPLICIT_PHI_IN_STACK
1839 new_Phi_in_stack(void) {
1842 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1844 res->stack = NEW_ARR_F (ir_node *, 0);
1848 } /* new_Phi_in_stack */
1851 free_Phi_in_stack(Phi_in_stack *s) {
1852 DEL_ARR_F(s->stack);
1854 } /* free_Phi_in_stack */
1857 free_to_Phi_in_stack(ir_node *phi) {
1858 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1859 current_ir_graph->Phi_in_stack->pos)
1860 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1862 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1864 (current_ir_graph->Phi_in_stack->pos)++;
1865 } /* free_to_Phi_in_stack */
1867 static INLINE ir_node *
1868 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1869 int arity, ir_node **in) {
1871 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1872 int pos = current_ir_graph->Phi_in_stack->pos;
1876 /* We need to allocate a new node */
1877 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1878 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1880 /* reuse the old node and initialize it again. */
1883 assert (res->kind == k_ir_node);
1884 assert (res->op == op_Phi);
1888 assert (arity >= 0);
1889 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1890 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1892 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1894 (current_ir_graph->Phi_in_stack->pos)--;
1897 } /* alloc_or_pop_from_Phi_in_stack */
1898 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1901 * Creates a Phi node with a given, fixed array **in of predecessors.
1902 * If the Phi node is unnecessary, as the same value reaches the block
1903 * through all control flow paths, it is eliminated and the value
1904 * returned directly. This constructor is only intended for use in
1905 * the automatic Phi node generation triggered by get_value or mature.
1906 * The implementation is quite tricky and depends on the fact, that
1907 * the nodes are allocated on a stack:
1908 * The in array contains predecessors and NULLs. The NULLs appear,
1909 * if get_r_value_internal, that computed the predecessors, reached
1910 * the same block on two paths. In this case the same value reaches
1911 * this block on both paths, there is no definition in between. We need
1912 * not allocate a Phi where these path's merge, but we have to communicate
1913 * this fact to the caller. This happens by returning a pointer to the
1914 * node the caller _will_ allocate. (Yes, we predict the address. We can
1915 * do so because the nodes are allocated on the obstack.) The caller then
1916 * finds a pointer to itself and, when this routine is called again,
1917 * eliminates itself.
1919 static INLINE ir_node *
1920 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1923 ir_node *res, *known;
1925 /* Allocate a new node on the obstack. This can return a node to
1926 which some of the pointers in the in-array already point.
1927 Attention: the constructor copies the in array, i.e., the later
1928 changes to the array in this routine do not affect the
1929 constructed node! If the in array contains NULLs, there will be
1930 missing predecessors in the returned node. Is this a possible
1931 internal state of the Phi node generation? */
1932 #if USE_EXPLICIT_PHI_IN_STACK
1933 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1935 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1936 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1939 /* The in-array can contain NULLs. These were returned by
1940 get_r_value_internal if it reached the same block/definition on a
1941 second path. The NULLs are replaced by the node itself to
1942 simplify the test in the next loop. */
1943 for (i = 0; i < ins; ++i) {
1948 /* This loop checks whether the Phi has more than one predecessor.
1949 If so, it is a real Phi node and we break the loop. Else the Phi
1950 node merges the same definition on several paths and therefore is
1952 for (i = 0; i < ins; ++i) {
1953 if (in[i] == res || in[i] == known)
1962 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1964 #if USE_EXPLICIT_PHI_IN_STACK
1965 free_to_Phi_in_stack(res);
1967 edges_node_deleted(res, current_ir_graph);
1968 obstack_free(current_ir_graph->obst, res);
1972 res = optimize_node (res);
1973 IRN_VRFY_IRG(res, irg);
1976 /* return the pointer to the Phi node. This node might be deallocated! */
1978 } /* new_rd_Phi_in */
1981 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1984 * Allocates and returns this node. The routine called to allocate the
1985 * node might optimize it away and return a real value, or even a pointer
1986 * to a deallocated Phi node on top of the obstack!
1987 * This function is called with an in-array of proper size.
1990 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1992 ir_node *prevBlock, *res;
1995 /* This loop goes to all predecessor blocks of the block the Phi node is in
1996 and there finds the operands of the Phi node by calling
1997 get_r_value_internal. */
1998 for (i = 1; i <= ins; ++i) {
1999 assert (block->in[i]);
2000 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2002 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2005 /* After collecting all predecessors into the array nin a new Phi node
2006 with these predecessors is created. This constructor contains an
2007 optimization: If all predecessors of the Phi node are identical it
2008 returns the only operand instead of a new Phi node. If the value
2009 passes two different control flow edges without being defined, and
2010 this is the second path treated, a pointer to the node that will be
2011 allocated for the first path (recursion) is returned. We already
2012 know the address of this node, as it is the next node to be allocated
2013 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2014 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2016 /* Now we now the value for "pos" and can enter it in the array with
2017 all known local variables. Attention: this might be a pointer to
2018 a node, that later will be allocated!!! See new_rd_Phi_in().
2019 If this is called in mature, after some set_value() in the same block,
2020 the proper value must not be overwritten:
2022 get_value (makes Phi0, put's it into graph_arr)
2023 set_value (overwrites Phi0 in graph_arr)
2024 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2027 if (!block->attr.block.graph_arr[pos]) {
2028 block->attr.block.graph_arr[pos] = res;
2030 /* printf(" value already computed by %s\n",
2031 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2038 * This function returns the last definition of a variable. In case
2039 * this variable was last defined in a previous block, Phi nodes are
2040 * inserted. If the part of the firm graph containing the definition
2041 * is not yet constructed, a dummy Phi node is returned.
2044 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2047 /* There are 4 cases to treat.
2049 1. The block is not mature and we visit it the first time. We can not
2050 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2051 predecessors is returned. This node is added to the linked list (field
2052 "link") of the containing block to be completed when this block is
2053 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2056 2. The value is already known in this block, graph_arr[pos] is set and we
2057 visit the block the first time. We can return the value without
2058 creating any new nodes.
2060 3. The block is mature and we visit it the first time. A Phi node needs
2061 to be created (phi_merge). If the Phi is not needed, as all it's
2062 operands are the same value reaching the block through different
2063 paths, it's optimized away and the value itself is returned.
2065 4. The block is mature, and we visit it the second time. Now two
2066 subcases are possible:
2067 * The value was computed completely the last time we were here. This
2068 is the case if there is no loop. We can return the proper value.
2069 * The recursion that visited this node and set the flag did not
2070 return yet. We are computing a value in a loop and need to
2071 break the recursion without knowing the result yet.
2072 @@@ strange case. Straight forward we would create a Phi before
2073 starting the computation of it's predecessors. In this case we will
2074 find a Phi here in any case. The problem is that this implementation
2075 only creates a Phi after computing the predecessors, so that it is
2076 hard to compute self references of this Phi. @@@
2077 There is no simple check for the second subcase. Therefore we check
2078 for a second visit and treat all such cases as the second subcase.
2079 Anyways, the basic situation is the same: we reached a block
2080 on two paths without finding a definition of the value: No Phi
2081 nodes are needed on both paths.
2082 We return this information "Two paths, no Phi needed" by a very tricky
2083 implementation that relies on the fact that an obstack is a stack and
2084 will return a node with the same address on different allocations.
2085 Look also at phi_merge and new_rd_phi_in to understand this.
2086 @@@ Unfortunately this does not work, see testprogram
2087 three_cfpred_example.
2091 /* case 4 -- already visited. */
2092 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2094 /* visited the first time */
2095 set_irn_visited(block, get_irg_visited(current_ir_graph));
2097 /* Get the local valid value */
2098 res = block->attr.block.graph_arr[pos];
2100 /* case 2 -- If the value is actually computed, return it. */
2101 if (res) return res;
2103 if (block->attr.block.matured) { /* case 3 */
2105 /* The Phi has the same amount of ins as the corresponding block. */
2106 int ins = get_irn_arity(block);
2108 NEW_ARR_A (ir_node *, nin, ins);
2110 /* Phi merge collects the predecessors and then creates a node. */
2111 res = phi_merge (block, pos, mode, nin, ins);
2113 } else { /* case 1 */
2114 /* The block is not mature, we don't know how many in's are needed. A Phi
2115 with zero predecessors is created. Such a Phi node is called Phi0
2116 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2117 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2119 The Phi0 has to remember the pos of it's internal value. If the real
2120 Phi is computed, pos is used to update the array with the local
2123 res = new_rd_Phi0 (current_ir_graph, block, mode);
2124 res->attr.phi0_pos = pos;
2125 res->link = block->link;
2129 /* If we get here, the frontend missed a use-before-definition error */
2132 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2133 assert (mode->code >= irm_F && mode->code <= irm_P);
2134 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2135 tarval_mode_null[mode->code]);
2138 /* The local valid value is available now. */
2139 block->attr.block.graph_arr[pos] = res;
2142 } /* get_r_value_internal */
2147 it starts the recursion. This causes an Id at the entry of
2148 every block that has no definition of the value! **/
2150 #if USE_EXPLICIT_PHI_IN_STACK
2152 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2153 void free_Phi_in_stack(Phi_in_stack *s) { }
2156 static INLINE ir_node *
2157 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2158 ir_node **in, int ins, ir_node *phi0)
2161 ir_node *res, *known;
2163 /* Allocate a new node on the obstack. The allocation copies the in
2165 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2166 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2168 /* This loop checks whether the Phi has more than one predecessor.
2169 If so, it is a real Phi node and we break the loop. Else the
2170 Phi node merges the same definition on several paths and therefore
2171 is not needed. Don't consider Bad nodes! */
2173 for (i=0; i < ins; ++i)
2177 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2179 /* Optimize self referencing Phis: We can't detect them yet properly, as
2180 they still refer to the Phi0 they will replace. So replace right now. */
2181 if (phi0 && in[i] == phi0) in[i] = res;
2183 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2191 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2194 edges_node_deleted(res, current_ir_graph);
2195 obstack_free (current_ir_graph->obst, res);
2196 if (is_Phi(known)) {
2197 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2198 order, an enclosing Phi know may get superfluous. */
2199 res = optimize_in_place_2(known);
2201 exchange(known, res);
2207 /* A undefined value, e.g., in unreachable code. */
2211 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2212 IRN_VRFY_IRG(res, irg);
2213 /* Memory Phis in endless loops must be kept alive.
2214 As we can't distinguish these easily we keep all of them alive. */
2215 if ((res->op == op_Phi) && (mode == mode_M))
2216 add_End_keepalive(get_irg_end(irg), res);
2220 } /* new_rd_Phi_in */
2223 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2225 #if PRECISE_EXC_CONTEXT
2227 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2230 * Construct a new frag_array for node n.
2231 * Copy the content from the current graph_arr of the corresponding block:
2232 * this is the current state.
2233 * Set ProjM(n) as current memory state.
2234 * Further the last entry in frag_arr of current block points to n. This
2235 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2237 static INLINE ir_node ** new_frag_arr(ir_node *n)
2242 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2243 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2244 sizeof(ir_node *)*current_ir_graph->n_loc);
2246 /* turn off optimization before allocating Proj nodes, as res isn't
2248 opt = get_opt_optimize(); set_optimize(0);
2249 /* Here we rely on the fact that all frag ops have Memory as first result! */
2250 if (get_irn_op(n) == op_Call)
2251 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2252 else if (get_irn_op(n) == op_CopyB)
2253 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2255 assert((pn_Quot_M == pn_DivMod_M) &&
2256 (pn_Quot_M == pn_Div_M) &&
2257 (pn_Quot_M == pn_Mod_M) &&
2258 (pn_Quot_M == pn_Load_M) &&
2259 (pn_Quot_M == pn_Store_M) &&
2260 (pn_Quot_M == pn_Alloc_M) &&
2261 (pn_Quot_M == pn_Bound_M));
2262 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2266 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2268 } /* new_frag_arr */
2271 * Returns the frag_arr from a node.
2273 static INLINE ir_node **get_frag_arr(ir_node *n) {
2274 switch (get_irn_opcode(n)) {
2276 return n->attr.call.exc.frag_arr;
2278 return n->attr.alloc.exc.frag_arr;
2280 return n->attr.load.exc.frag_arr;
2282 return n->attr.store.exc.frag_arr;
2284 return n->attr.except.frag_arr;
2286 } /* get_frag_arr */
2289 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2291 if (!frag_arr[pos]) frag_arr[pos] = val;
2292 if (frag_arr[current_ir_graph->n_loc - 1]) {
2293 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2294 assert(arr != frag_arr && "Endless recursion detected");
2295 set_frag_value(arr, pos, val);
2300 for (i = 0; i < 1000; ++i) {
2301 if (!frag_arr[pos]) {
2302 frag_arr[pos] = val;
2304 if (frag_arr[current_ir_graph->n_loc - 1]) {
2305 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2311 assert(0 && "potential endless recursion");
2313 } /* set_frag_value */
2316 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2320 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2322 frag_arr = get_frag_arr(cfOp);
2323 res = frag_arr[pos];
2325 if (block->attr.block.graph_arr[pos]) {
2326 /* There was a set_value() after the cfOp and no get_value before that
2327 set_value(). We must build a Phi node now. */
2328 if (block->attr.block.matured) {
2329 int ins = get_irn_arity(block);
2331 NEW_ARR_A (ir_node *, nin, ins);
2332 res = phi_merge(block, pos, mode, nin, ins);
2334 res = new_rd_Phi0 (current_ir_graph, block, mode);
2335 res->attr.phi0_pos = pos;
2336 res->link = block->link;
2340 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2341 but this should be better: (remove comment if this works) */
2342 /* It's a Phi, we can write this into all graph_arrs with NULL */
2343 set_frag_value(block->attr.block.graph_arr, pos, res);
2345 res = get_r_value_internal(block, pos, mode);
2346 set_frag_value(block->attr.block.graph_arr, pos, res);
2350 } /* get_r_frag_value_internal */
2351 #endif /* PRECISE_EXC_CONTEXT */
2354 * Computes the predecessors for the real phi node, and then
2355 * allocates and returns this node. The routine called to allocate the
2356 * node might optimize it away and return a real value.
2357 * This function must be called with an in-array of proper size.
2360 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2362 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2365 /* If this block has no value at pos create a Phi0 and remember it
2366 in graph_arr to break recursions.
2367 Else we may not set graph_arr as there a later value is remembered. */
2369 if (!block->attr.block.graph_arr[pos]) {
2370 if (block == get_irg_start_block(current_ir_graph)) {
2371 /* Collapsing to Bad tarvals is no good idea.
2372 So we call a user-supplied routine here that deals with this case as
2373 appropriate for the given language. Sorrily the only help we can give
2374 here is the position.
2376 Even if all variables are defined before use, it can happen that
2377 we get to the start block, if a Cond has been replaced by a tuple
2378 (bad, jmp). In this case we call the function needlessly, eventually
2379 generating an non existent error.
2380 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2383 if (default_initialize_local_variable) {
2384 ir_node *rem = get_cur_block();
2386 set_cur_block(block);
2387 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2391 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2392 /* We don't need to care about exception ops in the start block.
2393 There are none by definition. */
2394 return block->attr.block.graph_arr[pos];
2396 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2397 block->attr.block.graph_arr[pos] = phi0;
2398 #if PRECISE_EXC_CONTEXT
2399 if (get_opt_precise_exc_context()) {
2400 /* Set graph_arr for fragile ops. Also here we should break recursion.
2401 We could choose a cyclic path through an cfop. But the recursion would
2402 break at some point. */
2403 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2409 /* This loop goes to all predecessor blocks of the block the Phi node
2410 is in and there finds the operands of the Phi node by calling
2411 get_r_value_internal. */
2412 for (i = 1; i <= ins; ++i) {
2413 prevCfOp = skip_Proj(block->in[i]);
2415 if (is_Bad(prevCfOp)) {
2416 /* In case a Cond has been optimized we would get right to the start block
2417 with an invalid definition. */
2418 nin[i-1] = new_Bad();
2421 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2423 if (!is_Bad(prevBlock)) {
2424 #if PRECISE_EXC_CONTEXT
2425 if (get_opt_precise_exc_context() &&
2426 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2427 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2428 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2431 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2433 nin[i-1] = new_Bad();
2437 /* We want to pass the Phi0 node to the constructor: this finds additional
2438 optimization possibilities.
2439 The Phi0 node either is allocated in this function, or it comes from
2440 a former call to get_r_value_internal. In this case we may not yet
2441 exchange phi0, as this is done in mature_immBlock. */
2443 phi0_all = block->attr.block.graph_arr[pos];
2444 if (!((get_irn_op(phi0_all) == op_Phi) &&
2445 (get_irn_arity(phi0_all) == 0) &&
2446 (get_nodes_block(phi0_all) == block)))
2452 /* After collecting all predecessors into the array nin a new Phi node
2453 with these predecessors is created. This constructor contains an
2454 optimization: If all predecessors of the Phi node are identical it
2455 returns the only operand instead of a new Phi node. */
2456 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2458 /* In case we allocated a Phi0 node at the beginning of this procedure,
2459 we need to exchange this Phi0 with the real Phi. */
2461 exchange(phi0, res);
2462 block->attr.block.graph_arr[pos] = res;
2463 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2464 only an optimization. */
2471 * This function returns the last definition of a variable. In case
2472 * this variable was last defined in a previous block, Phi nodes are
2473 * inserted. If the part of the firm graph containing the definition
2474 * is not yet constructed, a dummy Phi node is returned.
2477 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2480 /* There are 4 cases to treat.
2482 1. The block is not mature and we visit it the first time. We can not
2483 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2484 predecessors is returned. This node is added to the linked list (field
2485 "link") of the containing block to be completed when this block is
2486 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2489 2. The value is already known in this block, graph_arr[pos] is set and we
2490 visit the block the first time. We can return the value without
2491 creating any new nodes.
2493 3. The block is mature and we visit it the first time. A Phi node needs
2494 to be created (phi_merge). If the Phi is not needed, as all it's
2495 operands are the same value reaching the block through different
2496 paths, it's optimized away and the value itself is returned.
2498 4. The block is mature, and we visit it the second time. Now two
2499 subcases are possible:
2500 * The value was computed completely the last time we were here. This
2501 is the case if there is no loop. We can return the proper value.
2502 * The recursion that visited this node and set the flag did not
2503 return yet. We are computing a value in a loop and need to
2504 break the recursion. This case only happens if we visited
2505 the same block with phi_merge before, which inserted a Phi0.
2506 So we return the Phi0.
2509 /* case 4 -- already visited. */
2510 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2511 /* As phi_merge allocates a Phi0 this value is always defined. Here
2512 is the critical difference of the two algorithms. */
2513 assert(block->attr.block.graph_arr[pos]);
2514 return block->attr.block.graph_arr[pos];
2517 /* visited the first time */
2518 set_irn_visited(block, get_irg_visited(current_ir_graph));
2520 /* Get the local valid value */
2521 res = block->attr.block.graph_arr[pos];
2523 /* case 2 -- If the value is actually computed, return it. */
2524 if (res) { return res; };
2526 if (block->attr.block.matured) { /* case 3 */
2528 /* The Phi has the same amount of ins as the corresponding block. */
2529 int ins = get_irn_arity(block);
2531 NEW_ARR_A (ir_node *, nin, ins);
2533 /* Phi merge collects the predecessors and then creates a node. */
2534 res = phi_merge (block, pos, mode, nin, ins);
2536 } else { /* case 1 */
2537 /* The block is not mature, we don't know how many in's are needed. A Phi
2538 with zero predecessors is created. Such a Phi node is called Phi0
2539 node. The Phi0 is then added to the list of Phi0 nodes in this block
2540 to be matured by mature_immBlock later.
2541 The Phi0 has to remember the pos of it's internal value. If the real
2542 Phi is computed, pos is used to update the array with the local
2544 res = new_rd_Phi0 (current_ir_graph, block, mode);
2545 res->attr.phi0_pos = pos;
2546 res->link = block->link;
2550 /* If we get here, the frontend missed a use-before-definition error */
2553 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2554 assert (mode->code >= irm_F && mode->code <= irm_P);
2555 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2556 get_mode_null(mode));
2559 /* The local valid value is available now. */
2560 block->attr.block.graph_arr[pos] = res;
2563 } /* get_r_value_internal */
2565 #endif /* USE_FAST_PHI_CONSTRUCTION */
2567 /* ************************************************************************** */
2570 * Finalize a Block node, when all control flows are known.
2571 * Acceptable parameters are only Block nodes.
2574 mature_immBlock(ir_node *block)
2580 assert (get_irn_opcode(block) == iro_Block);
2581 /* @@@ should be commented in
2582 assert (!get_Block_matured(block) && "Block already matured"); */
2584 if (!get_Block_matured(block)) {
2585 ins = ARR_LEN (block->in)-1;
2586 /* Fix block parameters */
2587 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2589 /* An array for building the Phi nodes. */
2590 NEW_ARR_A (ir_node *, nin, ins);
2592 /* Traverse a chain of Phi nodes attached to this block and mature
2594 for (n = block->link; n; n=next) {
2595 inc_irg_visited(current_ir_graph);
2597 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2600 block->attr.block.matured = 1;
2602 /* Now, as the block is a finished firm node, we can optimize it.
2603 Since other nodes have been allocated since the block was created
2604 we can not free the node on the obstack. Therefore we have to call
2606 Unfortunately the optimization does not change a lot, as all allocated
2607 nodes refer to the unoptimized node.
2608 We can call _2, as global cse has no effect on blocks. */
2609 block = optimize_in_place_2(block);
2610 IRN_VRFY_IRG(block, current_ir_graph);
2612 } /* mature_immBlock */
2615 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2616 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2620 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2621 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2625 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2626 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2627 } /* new_d_Const_long */
2630 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2631 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2632 } /* new_d_Const_type */
2636 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2637 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2641 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2642 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2646 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2648 assert(arg->op == op_Cond);
2649 arg->attr.cond.kind = fragmentary;
2650 arg->attr.cond.default_proj = max_proj;
2651 res = new_Proj (arg, mode_X, max_proj);
2653 } /* new_d_defaultProj */
2656 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode) {
2657 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2661 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2662 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2663 } /* new_d_strictConv */
2666 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2667 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2671 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2672 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2681 * Allocate the frag array.
2683 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2684 if (get_opt_precise_exc_context()) {
2685 if ((current_ir_graph->phase_state == phase_building) &&
2686 (get_irn_op(res) == op) && /* Could be optimized away. */
2687 !*frag_store) /* Could be a cse where the arr is already set. */ {
2688 *frag_store = new_frag_arr(res);
2691 } /* allocate_frag_arr */
2694 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2696 res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
2697 res->attr.except.pin_state = op_pin_state_pinned;
2698 #if PRECISE_EXC_CONTEXT
2699 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2706 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2708 res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
2709 res->attr.except.pin_state = op_pin_state_pinned;
2710 #if PRECISE_EXC_CONTEXT
2711 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2715 } /* new_d_DivMod */
2718 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2721 res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
2722 res->attr.except.pin_state = op_pin_state_pinned;
2723 #if PRECISE_EXC_CONTEXT
2724 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2731 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2733 res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
2734 res->attr.except.pin_state = op_pin_state_pinned;
2735 #if PRECISE_EXC_CONTEXT
2736 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2755 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2756 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2760 new_d_Jmp(dbg_info *db) {
2761 return new_bd_Jmp(db, current_ir_graph->current_block);
2765 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2766 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2770 new_d_Cond(dbg_info *db, ir_node *c) {
2771 return new_bd_Cond(db, current_ir_graph->current_block, c);
2775 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2779 res = new_bd_Call(db, current_ir_graph->current_block,
2780 store, callee, arity, in, tp);
2781 #if PRECISE_EXC_CONTEXT
2782 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2789 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2790 return new_bd_Return(db, current_ir_graph->current_block,
2792 } /* new_d_Return */
2795 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2797 res = new_bd_Load(db, current_ir_graph->current_block,
2799 #if PRECISE_EXC_CONTEXT
2800 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2807 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2809 res = new_bd_Store(db, current_ir_graph->current_block,
2811 #if PRECISE_EXC_CONTEXT
2812 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2819 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2823 res = new_bd_Alloc(db, current_ir_graph->current_block,
2824 store, size, alloc_type, where);
2825 #if PRECISE_EXC_CONTEXT
2826 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2833 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2834 ir_node *size, ir_type *free_type, where_alloc where)
2836 return new_bd_Free(db, current_ir_graph->current_block,
2837 store, ptr, size, free_type, where);
2841 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
2842 /* GL: objptr was called frame before. Frame was a bad choice for the name
2843 as the operand could as well be a pointer to a dynamic object. */
2845 return new_bd_Sel(db, current_ir_graph->current_block,
2846 store, objptr, 0, NULL, ent);
2847 } /* new_d_simpleSel */
2850 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2852 return new_bd_Sel(db, current_ir_graph->current_block,
2853 store, objptr, n_index, index, sel);
2857 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2859 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2861 } /* new_d_SymConst_type */
2864 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind)
2866 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2867 value, kind, firm_unknown_type);
2868 } /* new_d_SymConst */
2871 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2872 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2878 return _new_d_Bad();
2882 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2883 return new_bd_Confirm(db, current_ir_graph->current_block,
2885 } /* new_d_Confirm */
2888 new_d_Unknown(ir_mode *m) {
2889 return new_bd_Unknown(m);
2890 } /* new_d_Unknown */
2893 new_d_CallBegin(dbg_info *db, ir_node *call) {
2894 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2895 } /* new_d_CallBegin */
2898 new_d_EndReg(dbg_info *db) {
2899 return new_bd_EndReg(db, current_ir_graph->current_block);
2900 } /* new_d_EndReg */
2903 new_d_EndExcept(dbg_info *db) {
2904 return new_bd_EndExcept(db, current_ir_graph->current_block);
2905 } /* new_d_EndExcept */
2908 new_d_Break(dbg_info *db) {
2909 return new_bd_Break(db, current_ir_graph->current_block);
2913 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2914 return new_bd_Filter (db, current_ir_graph->current_block,
2916 } /* new_d_Filter */
2919 (new_d_NoMem)(void) {
2920 return _new_d_NoMem();
2924 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2925 ir_node *ir_true, ir_mode *mode) {
2926 return new_bd_Mux(db, current_ir_graph->current_block,
2927 sel, ir_false, ir_true, mode);
2931 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2932 return new_bd_Psi(db, current_ir_graph->current_block,
2933 arity, conds, vals, mode);
2936 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2937 ir_node *dst, ir_node *src, ir_type *data_type) {
2939 res = new_bd_CopyB(db, current_ir_graph->current_block,
2940 store, dst, src, data_type);
2941 #if PRECISE_EXC_CONTEXT
2942 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2948 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2949 return new_bd_InstOf(db, current_ir_graph->current_block,
2950 store, objptr, type);
2951 } /* new_d_InstOf */
2954 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2955 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2958 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2959 ir_node *idx, ir_node *lower, ir_node *upper) {
2961 res = new_bd_Bound(db, current_ir_graph->current_block,
2962 store, idx, lower, upper);
2963 #if PRECISE_EXC_CONTEXT
2964 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2970 new_d_Pin(dbg_info *db, ir_node *node) {
2971 return new_bd_Pin(db, current_ir_graph->current_block, node);
2974 /* ********************************************************************* */
2975 /* Comfortable interface with automatic Phi node construction. */
2976 /* (Uses also constructors of ?? interface, except new_Block. */
2977 /* ********************************************************************* */
2979 /* Block construction */
2980 /* immature Block without predecessors */
2981 ir_node *new_d_immBlock(dbg_info *db) {
2984 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2985 /* creates a new dynamic in-array as length of in is -1 */
2986 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2987 current_ir_graph->current_block = res;
2988 res->attr.block.matured = 0;
2989 res->attr.block.dead = 0;
2990 /* res->attr.block.exc = exc_normal; */
2991 /* res->attr.block.handler_entry = 0; */
2992 res->attr.block.irg = current_ir_graph;
2993 res->attr.block.backedge = NULL;
2994 res->attr.block.in_cg = NULL;
2995 res->attr.block.cg_backedge = NULL;
2996 set_Block_block_visited(res, 0);
2998 /* Create and initialize array for Phi-node construction. */
2999 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3000 current_ir_graph->n_loc);
3001 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3003 /* Immature block may not be optimized! */
3004 IRN_VRFY_IRG(res, current_ir_graph);
3007 } /* new_d_immBlock */
3010 new_immBlock(void) {
3011 return new_d_immBlock(NULL);
3012 } /* new_immBlock */
3014 /* add an edge to a jmp/control flow node */
3016 add_immBlock_pred(ir_node *block, ir_node *jmp)
3018 if (block->attr.block.matured) {
3019 assert(0 && "Error: Block already matured!\n");
3022 assert(jmp != NULL);
3023 ARR_APP1(ir_node *, block->in, jmp);
3025 } /* add_immBlock_pred */
3027 /* changing the current block */
3029 set_cur_block(ir_node *target) {
3030 current_ir_graph->current_block = target;
3031 } /* set_cur_block */
3033 /* ************************ */
3034 /* parameter administration */
3036 /* get a value from the parameter array from the current block by its index */
3038 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3039 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3040 inc_irg_visited(current_ir_graph);
3042 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3045 /* get a value from the parameter array from the current block by its index */
3047 get_value(int pos, ir_mode *mode) {
3048 return get_d_value(NULL, pos, mode);
3051 /* set a value at position pos in the parameter array from the current block */
3053 set_value(int pos, ir_node *value) {
3054 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3055 assert(pos+1 < current_ir_graph->n_loc);
3056 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3060 find_value(ir_node *value) {
3062 ir_node *bl = current_ir_graph->current_block;
3064 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3065 if (bl->attr.block.graph_arr[i] == value)
3070 /* get the current store */
3074 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3075 /* GL: one could call get_value instead */
3076 inc_irg_visited(current_ir_graph);
3077 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3080 /* set the current store: handles automatic Sync construction for Load nodes */
3082 set_store(ir_node *store)
3084 ir_node *load, *pload, *pred, *in[2];
3086 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3087 assert(get_irn_mode(store) == mode_M && "storing non-memory node");
3089 if (get_opt_auto_create_sync()) {
3090 /* handle non-volatile Load nodes by automatically creating Sync's */
3091 load = skip_Proj(store);
3092 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3093 pred = get_Load_mem(load);
3095 if (is_Sync(pred)) {
3096 /* a Load after a Sync: move it up */
3097 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3099 set_Load_mem(load, get_memop_mem(mem));
3100 add_Sync_pred(pred, store);
3104 pload = skip_Proj(pred);
3105 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3106 /* a Load after a Load: create a new Sync */
3107 set_Load_mem(load, get_Load_mem(pload));
3111 store = new_Sync(2, in);
3116 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3120 keep_alive(ir_node *ka) {
3121 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3124 /* --- Useful access routines --- */
3125 /* Returns the current block of the current graph. To set the current
3126 block use set_cur_block. */
3127 ir_node *get_cur_block(void) {
3128 return get_irg_current_block(current_ir_graph);
3129 } /* get_cur_block */
3131 /* Returns the frame type of the current graph */
3132 ir_type *get_cur_frame_type(void) {
3133 return get_irg_frame_type(current_ir_graph);
3134 } /* get_cur_frame_type */
3137 /* ********************************************************************* */
3140 /* call once for each run of the library */
3142 init_cons(uninitialized_local_variable_func_t *func) {
3143 default_initialize_local_variable = func;
3147 irp_finalize_cons(void) {
3149 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3150 irg_finalize_cons(get_irp_irg(i));
3152 irp->phase_state = phase_high;
3153 } /* irp_finalize_cons */
3156 ir_node *new_Block(int arity, ir_node **in) {
3157 return new_d_Block(NULL, arity, in);
3159 ir_node *new_Start (void) {
3160 return new_d_Start(NULL);
3162 ir_node *new_End (void) {
3163 return new_d_End(NULL);
3165 ir_node *new_Jmp (void) {
3166 return new_d_Jmp(NULL);
3168 ir_node *new_IJmp (ir_node *tgt) {
3169 return new_d_IJmp(NULL, tgt);
3171 ir_node *new_Cond (ir_node *c) {
3172 return new_d_Cond(NULL, c);
3174 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3175 return new_d_Return(NULL, store, arity, in);
3177 ir_node *new_Const (ir_mode *mode, tarval *con) {
3178 return new_d_Const(NULL, mode, con);
3181 ir_node *new_Const_long(ir_mode *mode, long value)
3183 return new_d_Const_long(NULL, mode, value);
3186 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3187 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3190 ir_node *new_SymConst_type (symconst_symbol value, symconst_kind kind, ir_type *type) {
3191 return new_d_SymConst_type(NULL, value, kind, type);
3193 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3194 return new_d_SymConst(NULL, value, kind);
3196 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3197 return new_d_simpleSel(NULL, store, objptr, ent);
3199 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3201 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3203 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3205 return new_d_Call(NULL, store, callee, arity, in, tp);
3207 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3208 return new_d_Add(NULL, op1, op2, mode);
3210 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3211 return new_d_Sub(NULL, op1, op2, mode);
3213 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3214 return new_d_Minus(NULL, op, mode);
3216 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3217 return new_d_Mul(NULL, op1, op2, mode);
3219 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3220 return new_d_Quot(NULL, memop, op1, op2);
3222 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3223 return new_d_DivMod(NULL, memop, op1, op2);
3225 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3226 return new_d_Div(NULL, memop, op1, op2);
3228 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3229 return new_d_Mod(NULL, memop, op1, op2);
3231 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3232 return new_d_Abs(NULL, op, mode);
3234 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3235 return new_d_And(NULL, op1, op2, mode);
3237 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3238 return new_d_Or(NULL, op1, op2, mode);
3240 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3241 return new_d_Eor(NULL, op1, op2, mode);
3243 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3244 return new_d_Not(NULL, op, mode);
3246 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3247 return new_d_Shl(NULL, op, k, mode);
3249 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3250 return new_d_Shr(NULL, op, k, mode);
3252 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3253 return new_d_Shrs(NULL, op, k, mode);
3255 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3256 return new_d_Rot(NULL, op, k, mode);
3258 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3259 return new_d_Carry(NULL, op1, op2, mode);
3261 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3262 return new_d_Borrow(NULL, op1, op2, mode);
3264 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3265 return new_d_Cmp(NULL, op1, op2);
3267 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3268 return new_d_Conv(NULL, op, mode);
3270 ir_node *new_strictConv (ir_node *op, ir_mode *mode) {
3271 return new_d_strictConv(NULL, op, mode);
3273 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3274 return new_d_Cast(NULL, op, to_tp);
3276 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3277 return new_d_Phi(NULL, arity, in, mode);
3279 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3280 return new_d_Load(NULL, store, addr, mode);
3282 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3283 return new_d_Store(NULL, store, addr, val);
3285 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3286 where_alloc where) {
3287 return new_d_Alloc(NULL, store, size, alloc_type, where);
3289 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3290 ir_type *free_type, where_alloc where) {
3291 return new_d_Free(NULL, store, ptr, size, free_type, where);
3293 ir_node *new_Sync (int arity, ir_node *in[]) {
3294 return new_d_Sync(NULL, arity, in);
3296 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3297 return new_d_Proj(NULL, arg, mode, proj);
3299 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3300 return new_d_defaultProj(NULL, arg, max_proj);
3302 ir_node *new_Tuple (int arity, ir_node **in) {
3303 return new_d_Tuple(NULL, arity, in);
3305 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3306 return new_d_Id(NULL, val, mode);
3308 ir_node *new_Bad (void) {
3311 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3312 return new_d_Confirm (NULL, val, bound, cmp);
3314 ir_node *new_Unknown(ir_mode *m) {
3315 return new_d_Unknown(m);
3317 ir_node *new_CallBegin (ir_node *callee) {
3318 return new_d_CallBegin(NULL, callee);
3320 ir_node *new_EndReg (void) {
3321 return new_d_EndReg(NULL);
3323 ir_node *new_EndExcept (void) {
3324 return new_d_EndExcept(NULL);
3326 ir_node *new_Break (void) {
3327 return new_d_Break(NULL);
3329 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3330 return new_d_Filter(NULL, arg, mode, proj);
3332 ir_node *new_NoMem (void) {
3333 return new_d_NoMem();
3335 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3336 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3338 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3339 return new_d_Psi(NULL, arity, conds, vals, mode);
3341 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3342 return new_d_CopyB(NULL, store, dst, src, data_type);
3344 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3345 return new_d_InstOf (NULL, store, objptr, ent);
3347 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3348 return new_d_Raise(NULL, store, obj);
3350 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3351 return new_d_Bound(NULL, store, idx, lower, upper);
3353 ir_node *new_Pin(ir_node *node) {
3354 return new_d_Pin(NULL, node);