3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler, Michael Beck
10 * Copyright: (c) 1998-2006 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
29 #include "irgraph_t.h"
33 #include "firm_common_t.h"
39 #include "irbackedge_t.h"
41 #include "iredges_t.h"
44 #if USE_EXPLICIT_PHI_IN_STACK
45 /* A stack needed for the automatic Phi node construction in constructor
46 Phi_in. Redefinition in irgraph.c!! */
51 typedef struct Phi_in_stack Phi_in_stack;
54 /* when we need verifying */
56 # define IRN_VRFY_IRG(res, irg)
58 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
62 * Language dependent variable initialization callback.
64 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* creates a bd constructor for a binop */
67 #define NEW_BD_BINOP(instr) \
69 new_bd_##instr(dbg_info *db, ir_node *block, \
70 ir_node *op1, ir_node *op2, ir_mode *mode) \
74 ir_graph *irg = current_ir_graph; \
77 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
78 res = optimize_node(res); \
79 IRN_VRFY_IRG(res, irg); \
83 /* creates a bd constructor for an unop */
84 #define NEW_BD_UNOP(instr) \
86 new_bd_##instr(dbg_info *db, ir_node *block, \
87 ir_node *op, ir_mode *mode) \
90 ir_graph *irg = current_ir_graph; \
91 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
92 res = optimize_node(res); \
93 IRN_VRFY_IRG(res, irg); \
97 /* creates a bd constructor for an divop */
98 #define NEW_BD_DIVOP(instr) \
100 new_bd_##instr(dbg_info *db, ir_node *block, \
101 ir_node *memop, ir_node *op1, ir_node *op2) \
105 ir_graph *irg = current_ir_graph; \
109 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
110 res = optimize_node(res); \
111 IRN_VRFY_IRG(res, irg); \
115 /* creates a rd constructor for a binop */
116 #define NEW_RD_BINOP(instr) \
118 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
119 ir_node *op1, ir_node *op2, ir_mode *mode) \
122 ir_graph *rem = current_ir_graph; \
123 current_ir_graph = irg; \
124 res = new_bd_##instr(db, block, op1, op2, mode); \
125 current_ir_graph = rem; \
129 /* creates a rd constructor for an unop */
130 #define NEW_RD_UNOP(instr) \
132 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
133 ir_node *op, ir_mode *mode) \
136 ir_graph *rem = current_ir_graph; \
137 current_ir_graph = irg; \
138 res = new_bd_##instr(db, block, op, mode); \
139 current_ir_graph = rem; \
143 /* creates a rd constructor for an divop */
144 #define NEW_RD_DIVOP(instr) \
146 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
147 ir_node *memop, ir_node *op1, ir_node *op2) \
150 ir_graph *rem = current_ir_graph; \
151 current_ir_graph = irg; \
152 res = new_bd_##instr(db, block, memop, op1, op2); \
153 current_ir_graph = rem; \
157 /* creates a d constructor for an binop */
158 #define NEW_D_BINOP(instr) \
160 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
161 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
164 /* creates a d constructor for an unop */
165 #define NEW_D_UNOP(instr) \
167 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
168 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
173 * Constructs a Block with a fixed number of predecessors.
174 * Does not set current_block. Can not be used with automatic
175 * Phi node construction.
178 new_bd_Block(dbg_info *db, int arity, ir_node **in)
181 ir_graph *irg = current_ir_graph;
183 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
184 set_Block_matured(res, 1);
185 set_Block_block_visited(res, 0);
187 /* res->attr.block.exc = exc_normal; */
188 /* res->attr.block.handler_entry = 0; */
189 res->attr.block.dead = 0;
190 res->attr.block.irg = irg;
191 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
192 res->attr.block.in_cg = NULL;
193 res->attr.block.cg_backedge = NULL;
194 res->attr.block.extblk = NULL;
196 IRN_VRFY_IRG(res, irg);
201 new_bd_Start(dbg_info *db, ir_node *block)
204 ir_graph *irg = current_ir_graph;
206 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
207 /* res->attr.start.irg = irg; */
209 IRN_VRFY_IRG(res, irg);
214 new_bd_End(dbg_info *db, ir_node *block)
217 ir_graph *irg = current_ir_graph;
219 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
221 IRN_VRFY_IRG(res, irg);
226 * Creates a Phi node with all predecessors. Calling this constructor
227 * is only allowed if the corresponding block is mature.
230 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
233 ir_graph *irg = current_ir_graph;
237 /* Don't assert that block matured: the use of this constructor is strongly
239 if ( get_Block_matured(block) )
240 assert( get_irn_arity(block) == arity );
242 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
244 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
246 for (i = arity-1; i >= 0; i--)
247 if (get_irn_op(in[i]) == op_Unknown) {
252 if (!has_unknown) res = optimize_node (res);
253 IRN_VRFY_IRG(res, irg);
255 /* Memory Phis in endless loops must be kept alive.
256 As we can't distinguish these easily we keep all of them alive. */
257 if ((res->op == op_Phi) && (mode == mode_M))
258 add_End_keepalive(get_irg_end(irg), res);
263 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
266 ir_graph *irg = current_ir_graph;
268 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
269 res->attr.con.tv = con;
270 set_Const_type(res, tp); /* Call method because of complex assertion. */
271 res = optimize_node (res);
272 assert(get_Const_type(res) == tp);
273 IRN_VRFY_IRG(res, irg);
276 } /* new_bd_Const_type */
279 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
281 ir_graph *irg = current_ir_graph;
283 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
287 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value)
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
292 } /* new_bd_Const_long */
295 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
298 ir_graph *irg = current_ir_graph;
300 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
301 res = optimize_node(res);
302 IRN_VRFY_IRG(res, irg);
307 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
314 res->attr.proj = proj;
317 assert(get_Proj_pred(res));
318 assert(get_nodes_block(get_Proj_pred(res)));
320 res = optimize_node(res);
322 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
332 ir_graph *irg = current_ir_graph;
334 assert(arg->op == op_Cond);
335 arg->attr.cond.kind = fragmentary;
336 arg->attr.cond.default_proj = max_proj;
337 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
339 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag)
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
355 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
358 ir_graph *irg = current_ir_graph;
360 assert(is_atomic_type(to_tp));
362 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
363 res->attr.cast.totype = to_tp;
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in)
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
402 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
406 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Jmp(dbg_info *db, ir_node *block)
419 ir_graph *irg = current_ir_graph;
421 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
422 res = optimize_node (res);
423 IRN_VRFY_IRG (res, irg);
428 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt)
431 ir_graph *irg = current_ir_graph;
433 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
434 res = optimize_node (res);
435 IRN_VRFY_IRG (res, irg);
437 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
443 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c)
446 ir_graph *irg = current_ir_graph;
448 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
449 res->attr.cond.kind = dense;
450 res->attr.cond.default_proj = 0;
451 res->attr.cond.pred = COND_JMP_PRED_NONE;
452 res = optimize_node (res);
453 IRN_VRFY_IRG(res, irg);
458 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
459 ir_node *callee, int arity, ir_node **in, ir_type *tp)
464 ir_graph *irg = current_ir_graph;
467 NEW_ARR_A(ir_node *, r_in, r_arity);
470 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
472 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
474 assert((get_unknown_type() == tp) || is_Method_type(tp));
475 set_Call_type(res, tp);
476 res->attr.call.exc.pin_state = op_pin_state_pinned;
477 res->attr.call.callee_arr = NULL;
478 res = optimize_node(res);
479 IRN_VRFY_IRG(res, irg);
484 new_bd_Return(dbg_info *db, ir_node *block,
485 ir_node *store, int arity, ir_node **in)
490 ir_graph *irg = current_ir_graph;
493 NEW_ARR_A (ir_node *, r_in, r_arity);
495 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
496 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
497 res = optimize_node(res);
498 IRN_VRFY_IRG(res, irg);
503 new_bd_Load(dbg_info *db, ir_node *block,
504 ir_node *store, ir_node *adr, ir_mode *mode)
508 ir_graph *irg = current_ir_graph;
512 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
513 res->attr.load.exc.pin_state = op_pin_state_pinned;
514 res->attr.load.load_mode = mode;
515 res->attr.load.volatility = volatility_non_volatile;
516 res = optimize_node(res);
517 IRN_VRFY_IRG(res, irg);
522 new_bd_Store(dbg_info *db, ir_node *block,
523 ir_node *store, ir_node *adr, ir_node *val)
527 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
533 res->attr.store.exc.pin_state = op_pin_state_pinned;
534 res->attr.store.volatility = volatility_non_volatile;
535 res = optimize_node(res);
536 IRN_VRFY_IRG(res, irg);
541 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
542 ir_node *size, ir_type *alloc_type, where_alloc where)
546 ir_graph *irg = current_ir_graph;
550 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
551 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
552 res->attr.alloc.where = where;
553 res->attr.alloc.type = alloc_type;
554 res = optimize_node(res);
555 IRN_VRFY_IRG(res, irg);
560 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
561 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
565 ir_graph *irg = current_ir_graph;
570 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
571 res->attr.free.where = where;
572 res->attr.free.type = free_type;
573 res = optimize_node(res);
574 IRN_VRFY_IRG(res, irg);
579 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
580 int arity, ir_node **in, ir_entity *ent)
585 ir_graph *irg = current_ir_graph;
587 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
590 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
593 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
595 * FIXM: Sel's can select functions which should be of mode mode_P_code.
597 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
598 res->attr.sel.ent = ent;
599 res = optimize_node(res);
600 IRN_VRFY_IRG(res, irg);
605 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
606 symconst_kind symkind, ir_type *tp) {
609 ir_graph *irg = current_ir_graph;
611 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
612 mode = mode_P_data; /* FIXME: can be mode_P_code */
616 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
618 res->attr.symc.num = symkind;
619 res->attr.symc.sym = value;
620 res->attr.symc.tp = tp;
622 res = optimize_node(res);
623 IRN_VRFY_IRG(res, irg);
625 } /* new_bd_SymConst_type */
628 new_bd_Sync(dbg_info *db, ir_node *block)
631 ir_graph *irg = current_ir_graph;
633 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
634 /* no need to call optimize node here, Sync are always created with no predecessors */
635 IRN_VRFY_IRG(res, irg);
640 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
642 ir_node *in[2], *res;
643 ir_graph *irg = current_ir_graph;
647 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
648 res->attr.confirm_cmp = cmp;
649 res = optimize_node (res);
650 IRN_VRFY_IRG(res, irg);
654 /* this function is often called with current_ir_graph unset */
656 new_bd_Unknown(ir_mode *m)
659 ir_graph *irg = current_ir_graph;
661 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
662 res = optimize_node(res);
664 } /* new_bd_Unknown */
667 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call)
671 ir_graph *irg = current_ir_graph;
673 in[0] = get_Call_ptr(call);
674 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
675 /* res->attr.callbegin.irg = irg; */
676 res->attr.callbegin.call = call;
677 res = optimize_node(res);
678 IRN_VRFY_IRG(res, irg);
680 } /* new_bd_CallBegin */
683 new_bd_EndReg(dbg_info *db, ir_node *block)
686 ir_graph *irg = current_ir_graph;
688 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
689 set_irg_end_reg(irg, res);
690 IRN_VRFY_IRG(res, irg);
692 } /* new_bd_EndReg */
695 new_bd_EndExcept(dbg_info *db, ir_node *block)
698 ir_graph *irg = current_ir_graph;
700 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
701 set_irg_end_except(irg, res);
702 IRN_VRFY_IRG (res, irg);
704 } /* new_bd_EndExcept */
707 new_bd_Break(dbg_info *db, ir_node *block)
710 ir_graph *irg = current_ir_graph;
712 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
713 res = optimize_node(res);
714 IRN_VRFY_IRG(res, irg);
719 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
723 ir_graph *irg = current_ir_graph;
725 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
726 res->attr.filter.proj = proj;
727 res->attr.filter.in_cg = NULL;
728 res->attr.filter.backedge = NULL;
731 assert(get_Proj_pred(res));
732 assert(get_nodes_block(get_Proj_pred(res)));
734 res = optimize_node(res);
735 IRN_VRFY_IRG(res, irg);
737 } /* new_bd_Filter */
740 new_bd_Mux(dbg_info *db, ir_node *block,
741 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
745 ir_graph *irg = current_ir_graph;
751 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
754 res = optimize_node(res);
755 IRN_VRFY_IRG(res, irg);
760 new_bd_Psi(dbg_info *db, ir_node *block,
761 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
765 ir_graph *irg = current_ir_graph;
768 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
770 for (i = 0; i < arity; ++i) {
772 in[2 * i + 1] = vals[i];
776 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
779 res = optimize_node(res);
780 IRN_VRFY_IRG(res, irg);
785 new_bd_CopyB(dbg_info *db, ir_node *block,
786 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
790 ir_graph *irg = current_ir_graph;
796 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
798 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
799 res->attr.copyb.data_type = data_type;
800 res = optimize_node(res);
801 IRN_VRFY_IRG(res, irg);
806 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
807 ir_node *objptr, ir_type *type)
811 ir_graph *irg = current_ir_graph;
815 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
816 res->attr.instof.type = type;
817 res = optimize_node(res);
818 IRN_VRFY_IRG(res, irg);
820 } /* new_bd_InstOf */
823 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
827 ir_graph *irg = current_ir_graph;
831 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
832 res = optimize_node(res);
833 IRN_VRFY_IRG(res, irg);
838 new_bd_Bound(dbg_info *db, ir_node *block,
839 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
843 ir_graph *irg = current_ir_graph;
849 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
850 res->attr.bound.exc.pin_state = op_pin_state_pinned;
851 res = optimize_node(res);
852 IRN_VRFY_IRG(res, irg);
857 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node)
860 ir_graph *irg = current_ir_graph;
862 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
863 res = optimize_node(res);
864 IRN_VRFY_IRG(res, irg);
868 /* --------------------------------------------- */
869 /* private interfaces, for professional use only */
870 /* --------------------------------------------- */
872 /* Constructs a Block with a fixed number of predecessors.
873 Does not set current_block. Can not be used with automatic
874 Phi node construction. */
876 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in)
878 ir_graph *rem = current_ir_graph;
881 current_ir_graph = irg;
882 res = new_bd_Block(db, arity, in);
883 current_ir_graph = rem;
889 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
891 ir_graph *rem = current_ir_graph;
894 current_ir_graph = irg;
895 res = new_bd_Start(db, block);
896 current_ir_graph = rem;
902 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
905 ir_graph *rem = current_ir_graph;
907 current_ir_graph = rem;
908 res = new_bd_End(db, block);
909 current_ir_graph = rem;
914 /* Creates a Phi node with all predecessors. Calling this constructor
915 is only allowed if the corresponding block is mature. */
917 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
920 ir_graph *rem = current_ir_graph;
922 current_ir_graph = irg;
923 res = new_bd_Phi(db, block,arity, in, mode);
924 current_ir_graph = rem;
930 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
933 ir_graph *rem = current_ir_graph;
935 current_ir_graph = irg;
936 res = new_bd_Const_type(db, block, mode, con, tp);
937 current_ir_graph = rem;
940 } /* new_rd_Const_type */
943 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
946 ir_graph *rem = current_ir_graph;
948 current_ir_graph = irg;
949 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
950 current_ir_graph = rem;
956 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
958 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
959 } /* new_rd_Const_long */
962 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
965 ir_graph *rem = current_ir_graph;
967 current_ir_graph = irg;
968 res = new_bd_Id(db, block, val, mode);
969 current_ir_graph = rem;
975 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_Proj(db, block, arg, mode, proj);
983 current_ir_graph = rem;
989 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
993 ir_graph *rem = current_ir_graph;
995 current_ir_graph = irg;
996 res = new_bd_defaultProj(db, block, arg, max_proj);
997 current_ir_graph = rem;
1000 } /* new_rd_defaultProj */
1003 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1006 ir_graph *rem = current_ir_graph;
1008 current_ir_graph = irg;
1009 res = new_bd_Conv(db, block, op, mode, 0);
1010 current_ir_graph = rem;
1016 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1019 ir_graph *rem = current_ir_graph;
1021 current_ir_graph = irg;
1022 res = new_bd_Cast(db, block, op, to_tp);
1023 current_ir_graph = rem;
1029 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1032 ir_graph *rem = current_ir_graph;
1034 current_ir_graph = irg;
1035 res = new_bd_Tuple(db, block, arity, in);
1036 current_ir_graph = rem;
1039 } /* new_rd_Tuple */
1046 NEW_RD_DIVOP(DivMod)
1059 NEW_RD_BINOP(Borrow)
1062 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1063 ir_node *op1, ir_node *op2)
1066 ir_graph *rem = current_ir_graph;
1068 current_ir_graph = irg;
1069 res = new_bd_Cmp(db, block, op1, op2);
1070 current_ir_graph = rem;
1076 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block)
1079 ir_graph *rem = current_ir_graph;
1081 current_ir_graph = irg;
1082 res = new_bd_Jmp(db, block);
1083 current_ir_graph = rem;
1089 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_IJmp(db, block, tgt);
1096 current_ir_graph = rem;
1102 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1105 ir_graph *rem = current_ir_graph;
1107 current_ir_graph = irg;
1108 res = new_bd_Cond(db, block, c);
1109 current_ir_graph = rem;
1115 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1116 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1119 ir_graph *rem = current_ir_graph;
1121 current_ir_graph = irg;
1122 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1123 current_ir_graph = rem;
1129 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1130 ir_node *store, int arity, ir_node **in)
1133 ir_graph *rem = current_ir_graph;
1135 current_ir_graph = irg;
1136 res = new_bd_Return(db, block, store, arity, in);
1137 current_ir_graph = rem;
1140 } /* new_rd_Return */
1143 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1144 ir_node *store, ir_node *adr, ir_mode *mode)
1147 ir_graph *rem = current_ir_graph;
1149 current_ir_graph = irg;
1150 res = new_bd_Load(db, block, store, adr, mode);
1151 current_ir_graph = rem;
1157 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1158 ir_node *store, ir_node *adr, ir_node *val)
1161 ir_graph *rem = current_ir_graph;
1163 current_ir_graph = irg;
1164 res = new_bd_Store(db, block, store, adr, val);
1165 current_ir_graph = rem;
1168 } /* new_rd_Store */
1171 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1172 ir_node *size, ir_type *alloc_type, where_alloc where)
1175 ir_graph *rem = current_ir_graph;
1177 current_ir_graph = irg;
1178 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1179 current_ir_graph = rem;
1182 } /* new_rd_Alloc */
1185 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1186 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1189 ir_graph *rem = current_ir_graph;
1191 current_ir_graph = irg;
1192 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1193 current_ir_graph = rem;
1199 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1200 ir_node *store, ir_node *objptr, ir_entity *ent)
1203 ir_graph *rem = current_ir_graph;
1205 current_ir_graph = irg;
1206 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1207 current_ir_graph = rem;
1210 } /* new_rd_simpleSel */
1213 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1214 int arity, ir_node **in, ir_entity *ent)
1217 ir_graph *rem = current_ir_graph;
1219 current_ir_graph = irg;
1220 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1221 current_ir_graph = rem;
1227 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1228 symconst_kind symkind, ir_type *tp)
1231 ir_graph *rem = current_ir_graph;
1233 current_ir_graph = irg;
1234 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1235 current_ir_graph = rem;
1238 } /* new_rd_SymConst_type */
1241 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1242 symconst_kind symkind)
1244 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1246 } /* new_rd_SymConst */
1248 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp)
1250 symconst_symbol sym;
1251 sym.entity_p = symbol;
1252 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1253 } /* new_rd_SymConst_addr_ent */
1255 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp)
1257 symconst_symbol sym;
1258 sym.entity_p = symbol;
1259 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1260 } /* new_rd_SymConst_ofs_ent */
1262 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1263 symconst_symbol sym;
1264 sym.ident_p = symbol;
1265 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1266 } /* new_rd_SymConst_addr_name */
1268 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1269 symconst_symbol sym;
1270 sym.type_p = symbol;
1271 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1272 } /* new_rd_SymConst_type_tag */
1274 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1275 symconst_symbol sym;
1276 sym.type_p = symbol;
1277 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1278 } /* new_rd_SymConst_size */
1280 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1281 symconst_symbol sym;
1282 sym.type_p = symbol;
1283 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1284 } /* new_rd_SymConst_align */
1287 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1290 ir_graph *rem = current_ir_graph;
1293 current_ir_graph = irg;
1294 res = new_bd_Sync(db, block);
1295 current_ir_graph = rem;
1297 for (i = 0; i < arity; ++i)
1298 add_Sync_pred(res, in[i]);
1304 new_rd_Bad(ir_graph *irg) {
1305 return get_irg_bad(irg);
1309 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1312 ir_graph *rem = current_ir_graph;
1314 current_ir_graph = irg;
1315 res = new_bd_Confirm(db, block, val, bound, cmp);
1316 current_ir_graph = rem;
1319 } /* new_rd_Confirm */
1321 /* this function is often called with current_ir_graph unset */
1323 new_rd_Unknown(ir_graph *irg, ir_mode *m)
1326 ir_graph *rem = current_ir_graph;
1328 current_ir_graph = irg;
1329 res = new_bd_Unknown(m);
1330 current_ir_graph = rem;
1333 } /* new_rd_Unknown */
1336 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1339 ir_graph *rem = current_ir_graph;
1341 current_ir_graph = irg;
1342 res = new_bd_CallBegin(db, block, call);
1343 current_ir_graph = rem;
1346 } /* new_rd_CallBegin */
1349 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
1353 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1354 set_irg_end_reg(irg, res);
1355 IRN_VRFY_IRG(res, irg);
1357 } /* new_rd_EndReg */
1360 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
1364 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1365 set_irg_end_except(irg, res);
1366 IRN_VRFY_IRG (res, irg);
1368 } /* new_rd_EndExcept */
1371 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block)
1374 ir_graph *rem = current_ir_graph;
1376 current_ir_graph = irg;
1377 res = new_bd_Break(db, block);
1378 current_ir_graph = rem;
1381 } /* new_rd_Break */
1384 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1388 ir_graph *rem = current_ir_graph;
1390 current_ir_graph = irg;
1391 res = new_bd_Filter(db, block, arg, mode, proj);
1392 current_ir_graph = rem;
1395 } /* new_rd_Filter */
1398 new_rd_NoMem(ir_graph *irg) {
1399 return get_irg_no_mem(irg);
1400 } /* new_rd_NoMem */
1403 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1404 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1407 ir_graph *rem = current_ir_graph;
1409 current_ir_graph = irg;
1410 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1411 current_ir_graph = rem;
1417 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1418 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1421 ir_graph *rem = current_ir_graph;
1423 current_ir_graph = irg;
1424 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1425 current_ir_graph = rem;
1430 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1431 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1434 ir_graph *rem = current_ir_graph;
1436 current_ir_graph = irg;
1437 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1438 current_ir_graph = rem;
1441 } /* new_rd_CopyB */
1444 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1445 ir_node *objptr, ir_type *type)
1448 ir_graph *rem = current_ir_graph;
1450 current_ir_graph = irg;
1451 res = new_bd_InstOf(db, block, store, objptr, type);
1452 current_ir_graph = rem;
1455 } /* new_rd_InstOf */
1458 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1461 ir_graph *rem = current_ir_graph;
1463 current_ir_graph = irg;
1464 res = new_bd_Raise(db, block, store, obj);
1465 current_ir_graph = rem;
1468 } /* new_rd_Raise */
1470 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1471 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1474 ir_graph *rem = current_ir_graph;
1476 current_ir_graph = irg;
1477 res = new_bd_Bound(db, block, store, idx, lower, upper);
1478 current_ir_graph = rem;
1481 } /* new_rd_Bound */
1483 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node)
1486 ir_graph *rem = current_ir_graph;
1488 current_ir_graph = irg;
1489 res = new_bd_Pin(db, block, node);
1490 current_ir_graph = rem;
1495 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1496 return new_rd_Block(NULL, irg, arity, in);
1498 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1499 return new_rd_Start(NULL, irg, block);
1501 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1502 return new_rd_End(NULL, irg, block);
1504 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1505 return new_rd_Jmp(NULL, irg, block);
1507 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1508 return new_rd_IJmp(NULL, irg, block, tgt);
1510 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1511 return new_rd_Cond(NULL, irg, block, c);
1513 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1514 ir_node *store, int arity, ir_node **in) {
1515 return new_rd_Return(NULL, irg, block, store, arity, in);
1517 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1518 ir_mode *mode, tarval *con) {
1519 return new_rd_Const(NULL, irg, block, mode, con);
1521 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1522 ir_mode *mode, long value) {
1523 return new_rd_Const_long(NULL, irg, block, mode, value);
1525 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1526 ir_mode *mode, tarval *con, ir_type *tp) {
1527 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1529 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1530 symconst_symbol value, symconst_kind symkind) {
1531 return new_rd_SymConst(NULL, irg, block, value, symkind);
1533 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1534 ir_node *objptr, ir_entity *ent) {
1535 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1537 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1538 ir_node *objptr, int n_index, ir_node **index,
1540 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1542 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1543 ir_node *callee, int arity, ir_node **in,
1545 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1547 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1548 ir_node *op1, ir_node *op2, ir_mode *mode) {
1549 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1551 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1552 ir_node *op1, ir_node *op2, ir_mode *mode) {
1553 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1555 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1556 ir_node *op, ir_mode *mode) {
1557 return new_rd_Minus(NULL, irg, block, op, mode);
1559 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1560 ir_node *op1, ir_node *op2, ir_mode *mode) {
1561 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1563 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1564 ir_node *memop, ir_node *op1, ir_node *op2) {
1565 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1567 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1568 ir_node *memop, ir_node *op1, ir_node *op2) {
1569 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1571 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1572 ir_node *memop, ir_node *op1, ir_node *op2) {
1573 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1575 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1576 ir_node *memop, ir_node *op1, ir_node *op2) {
1577 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1579 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1580 ir_node *op, ir_mode *mode) {
1581 return new_rd_Abs(NULL, irg, block, op, mode);
1583 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1584 ir_node *op1, ir_node *op2, ir_mode *mode) {
1585 return new_rd_And(NULL, irg, block, op1, op2, mode);
1587 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1588 ir_node *op1, ir_node *op2, ir_mode *mode) {
1589 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1591 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1592 ir_node *op1, ir_node *op2, ir_mode *mode) {
1593 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1595 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1596 ir_node *op, ir_mode *mode) {
1597 return new_rd_Not(NULL, irg, block, op, mode);
1599 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1600 ir_node *op, ir_node *k, ir_mode *mode) {
1601 return new_rd_Shl(NULL, irg, block, op, k, mode);
1603 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1604 ir_node *op, ir_node *k, ir_mode *mode) {
1605 return new_rd_Shr(NULL, irg, block, op, k, mode);
1607 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1608 ir_node *op, ir_node *k, ir_mode *mode) {
1609 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1611 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1612 ir_node *op, ir_node *k, ir_mode *mode) {
1613 return new_rd_Rot(NULL, irg, block, op, k, mode);
1615 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1616 ir_node *op, ir_node *k, ir_mode *mode) {
1617 return new_rd_Carry(NULL, irg, block, op, k, mode);
1619 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1620 ir_node *op, ir_node *k, ir_mode *mode) {
1621 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1623 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1624 ir_node *op1, ir_node *op2) {
1625 return new_rd_Cmp(NULL, irg, block, op1, op2);
1627 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1628 ir_node *op, ir_mode *mode) {
1629 return new_rd_Conv(NULL, irg, block, op, mode);
1631 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1632 return new_rd_Cast(NULL, irg, block, op, to_tp);
1634 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1635 ir_node **in, ir_mode *mode) {
1636 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1638 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1639 ir_node *store, ir_node *adr, ir_mode *mode) {
1640 return new_rd_Load(NULL, irg, block, store, adr, mode);
1642 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1643 ir_node *store, ir_node *adr, ir_node *val) {
1644 return new_rd_Store(NULL, irg, block, store, adr, val);
1646 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1647 ir_node *size, ir_type *alloc_type, where_alloc where) {
1648 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1650 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1651 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1652 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1654 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1655 return new_rd_Sync(NULL, irg, block, arity, in);
1657 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1658 ir_mode *mode, long proj) {
1659 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1661 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1663 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1665 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1666 int arity, ir_node **in) {
1667 return new_rd_Tuple(NULL, irg, block, arity, in );
1669 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1670 ir_node *val, ir_mode *mode) {
1671 return new_rd_Id(NULL, irg, block, val, mode);
1673 ir_node *new_r_Bad (ir_graph *irg) {
1674 return new_rd_Bad(irg);
1676 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1677 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1679 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1680 return new_rd_Unknown(irg, m);
1682 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1683 return new_rd_CallBegin(NULL, irg, block, callee);
1685 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1686 return new_rd_EndReg(NULL, irg, block);
1688 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1689 return new_rd_EndExcept(NULL, irg, block);
1691 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1692 return new_rd_Break(NULL, irg, block);
1694 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1695 ir_mode *mode, long proj) {
1696 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1698 ir_node *new_r_NoMem (ir_graph *irg) {
1699 return new_rd_NoMem(irg);
1701 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1702 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1703 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1705 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1706 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1707 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1709 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1710 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1711 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1713 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1715 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1717 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1718 ir_node *store, ir_node *obj) {
1719 return new_rd_Raise(NULL, irg, block, store, obj);
1721 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1722 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1723 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1725 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1726 return new_rd_Pin(NULL, irg, block, node);
1729 /** ********************/
1730 /** public interfaces */
1731 /** construction tools */
1735 * - create a new Start node in the current block
1737 * @return s - pointer to the created Start node
1742 new_d_Start(dbg_info *db)
1746 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1747 op_Start, mode_T, 0, NULL);
1748 /* res->attr.start.irg = current_ir_graph; */
1750 res = optimize_node(res);
1751 IRN_VRFY_IRG(res, current_ir_graph);
1756 new_d_End(dbg_info *db)
1759 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1760 op_End, mode_X, -1, NULL);
1761 res = optimize_node(res);
1762 IRN_VRFY_IRG(res, current_ir_graph);
1767 /* Constructs a Block with a fixed number of predecessors.
1768 Does set current_block. Can be used with automatic Phi
1769 node construction. */
1771 new_d_Block(dbg_info *db, int arity, ir_node **in)
1775 int has_unknown = 0;
1777 res = new_bd_Block(db, arity, in);
1779 /* Create and initialize array for Phi-node construction. */
1780 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1781 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1782 current_ir_graph->n_loc);
1783 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1786 for (i = arity-1; i >= 0; i--)
1787 if (get_irn_op(in[i]) == op_Unknown) {
1792 if (!has_unknown) res = optimize_node(res);
1793 current_ir_graph->current_block = res;
1795 IRN_VRFY_IRG(res, current_ir_graph);
1800 /* ***********************************************************************/
1801 /* Methods necessary for automatic Phi node creation */
1803 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1804 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1805 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1806 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1808 Call Graph: ( A ---> B == A "calls" B)
1810 get_value mature_immBlock
1818 get_r_value_internal |
1822 new_rd_Phi0 new_rd_Phi_in
1824 * *************************************************************************** */
1826 /** Creates a Phi node with 0 predecessors. */
1827 static INLINE ir_node *
1828 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
1832 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1833 IRN_VRFY_IRG(res, irg);
1837 /* There are two implementations of the Phi node construction. The first
1838 is faster, but does not work for blocks with more than 2 predecessors.
1839 The second works always but is slower and causes more unnecessary Phi
1841 Select the implementations by the following preprocessor flag set in
1843 #if USE_FAST_PHI_CONSTRUCTION
1845 /* This is a stack used for allocating and deallocating nodes in
1846 new_rd_Phi_in. The original implementation used the obstack
1847 to model this stack, now it is explicit. This reduces side effects.
1849 #if USE_EXPLICIT_PHI_IN_STACK
1851 new_Phi_in_stack(void) {
1854 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1856 res->stack = NEW_ARR_F (ir_node *, 0);
1860 } /* new_Phi_in_stack */
1863 free_Phi_in_stack(Phi_in_stack *s) {
1864 DEL_ARR_F(s->stack);
1866 } /* free_Phi_in_stack */
1869 free_to_Phi_in_stack(ir_node *phi) {
1870 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1871 current_ir_graph->Phi_in_stack->pos)
1872 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1874 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1876 (current_ir_graph->Phi_in_stack->pos)++;
1877 } /* free_to_Phi_in_stack */
1879 static INLINE ir_node *
1880 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1881 int arity, ir_node **in) {
1883 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1884 int pos = current_ir_graph->Phi_in_stack->pos;
1888 /* We need to allocate a new node */
1889 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1890 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1892 /* reuse the old node and initialize it again. */
1895 assert (res->kind == k_ir_node);
1896 assert (res->op == op_Phi);
1900 assert (arity >= 0);
1901 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1902 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1904 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1906 (current_ir_graph->Phi_in_stack->pos)--;
1909 } /* alloc_or_pop_from_Phi_in_stack */
1910 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1913 * Creates a Phi node with a given, fixed array **in of predecessors.
1914 * If the Phi node is unnecessary, as the same value reaches the block
1915 * through all control flow paths, it is eliminated and the value
1916 * returned directly. This constructor is only intended for use in
1917 * the automatic Phi node generation triggered by get_value or mature.
1918 * The implementation is quite tricky and depends on the fact, that
1919 * the nodes are allocated on a stack:
1920 * The in array contains predecessors and NULLs. The NULLs appear,
1921 * if get_r_value_internal, that computed the predecessors, reached
1922 * the same block on two paths. In this case the same value reaches
1923 * this block on both paths, there is no definition in between. We need
1924 * not allocate a Phi where these path's merge, but we have to communicate
1925 * this fact to the caller. This happens by returning a pointer to the
1926 * node the caller _will_ allocate. (Yes, we predict the address. We can
1927 * do so because the nodes are allocated on the obstack.) The caller then
1928 * finds a pointer to itself and, when this routine is called again,
1929 * eliminates itself.
1931 static INLINE ir_node *
1932 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1935 ir_node *res, *known;
1937 /* Allocate a new node on the obstack. This can return a node to
1938 which some of the pointers in the in-array already point.
1939 Attention: the constructor copies the in array, i.e., the later
1940 changes to the array in this routine do not affect the
1941 constructed node! If the in array contains NULLs, there will be
1942 missing predecessors in the returned node. Is this a possible
1943 internal state of the Phi node generation? */
1944 #if USE_EXPLICIT_PHI_IN_STACK
1945 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1947 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1948 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1951 /* The in-array can contain NULLs. These were returned by
1952 get_r_value_internal if it reached the same block/definition on a
1953 second path. The NULLs are replaced by the node itself to
1954 simplify the test in the next loop. */
1955 for (i = 0; i < ins; ++i) {
1960 /* This loop checks whether the Phi has more than one predecessor.
1961 If so, it is a real Phi node and we break the loop. Else the Phi
1962 node merges the same definition on several paths and therefore is
1964 for (i = 0; i < ins; ++i) {
1965 if (in[i] == res || in[i] == known)
1974 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1976 #if USE_EXPLICIT_PHI_IN_STACK
1977 free_to_Phi_in_stack(res);
1979 edges_node_deleted(res, current_ir_graph);
1980 obstack_free(current_ir_graph->obst, res);
1984 res = optimize_node (res);
1985 IRN_VRFY_IRG(res, irg);
1988 /* return the pointer to the Phi node. This node might be deallocated! */
1990 } /* new_rd_Phi_in */
1993 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1996 * Allocates and returns this node. The routine called to allocate the
1997 * node might optimize it away and return a real value, or even a pointer
1998 * to a deallocated Phi node on top of the obstack!
1999 * This function is called with an in-array of proper size.
2002 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2004 ir_node *prevBlock, *res;
2007 /* This loop goes to all predecessor blocks of the block the Phi node is in
2008 and there finds the operands of the Phi node by calling
2009 get_r_value_internal. */
2010 for (i = 1; i <= ins; ++i) {
2011 assert (block->in[i]);
2012 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2014 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2017 /* After collecting all predecessors into the array nin a new Phi node
2018 with these predecessors is created. This constructor contains an
2019 optimization: If all predecessors of the Phi node are identical it
2020 returns the only operand instead of a new Phi node. If the value
2021 passes two different control flow edges without being defined, and
2022 this is the second path treated, a pointer to the node that will be
2023 allocated for the first path (recursion) is returned. We already
2024 know the address of this node, as it is the next node to be allocated
2025 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2026 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2028 /* Now we now the value for "pos" and can enter it in the array with
2029 all known local variables. Attention: this might be a pointer to
2030 a node, that later will be allocated!!! See new_rd_Phi_in().
2031 If this is called in mature, after some set_value() in the same block,
2032 the proper value must not be overwritten:
2034 get_value (makes Phi0, put's it into graph_arr)
2035 set_value (overwrites Phi0 in graph_arr)
2036 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2039 if (!block->attr.block.graph_arr[pos]) {
2040 block->attr.block.graph_arr[pos] = res;
2042 /* printf(" value already computed by %s\n",
2043 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2050 * This function returns the last definition of a variable. In case
2051 * this variable was last defined in a previous block, Phi nodes are
2052 * inserted. If the part of the firm graph containing the definition
2053 * is not yet constructed, a dummy Phi node is returned.
2056 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2059 /* There are 4 cases to treat.
2061 1. The block is not mature and we visit it the first time. We can not
2062 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2063 predecessors is returned. This node is added to the linked list (field
2064 "link") of the containing block to be completed when this block is
2065 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2068 2. The value is already known in this block, graph_arr[pos] is set and we
2069 visit the block the first time. We can return the value without
2070 creating any new nodes.
2072 3. The block is mature and we visit it the first time. A Phi node needs
2073 to be created (phi_merge). If the Phi is not needed, as all it's
2074 operands are the same value reaching the block through different
2075 paths, it's optimized away and the value itself is returned.
2077 4. The block is mature, and we visit it the second time. Now two
2078 subcases are possible:
2079 * The value was computed completely the last time we were here. This
2080 is the case if there is no loop. We can return the proper value.
2081 * The recursion that visited this node and set the flag did not
2082 return yet. We are computing a value in a loop and need to
2083 break the recursion without knowing the result yet.
2084 @@@ strange case. Straight forward we would create a Phi before
2085 starting the computation of it's predecessors. In this case we will
2086 find a Phi here in any case. The problem is that this implementation
2087 only creates a Phi after computing the predecessors, so that it is
2088 hard to compute self references of this Phi. @@@
2089 There is no simple check for the second subcase. Therefore we check
2090 for a second visit and treat all such cases as the second subcase.
2091 Anyways, the basic situation is the same: we reached a block
2092 on two paths without finding a definition of the value: No Phi
2093 nodes are needed on both paths.
2094 We return this information "Two paths, no Phi needed" by a very tricky
2095 implementation that relies on the fact that an obstack is a stack and
2096 will return a node with the same address on different allocations.
2097 Look also at phi_merge and new_rd_phi_in to understand this.
2098 @@@ Unfortunately this does not work, see testprogram
2099 three_cfpred_example.
2103 /* case 4 -- already visited. */
2104 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2106 /* visited the first time */
2107 set_irn_visited(block, get_irg_visited(current_ir_graph));
2109 /* Get the local valid value */
2110 res = block->attr.block.graph_arr[pos];
2112 /* case 2 -- If the value is actually computed, return it. */
2113 if (res) return res;
2115 if (block->attr.block.matured) { /* case 3 */
2117 /* The Phi has the same amount of ins as the corresponding block. */
2118 int ins = get_irn_arity(block);
2120 NEW_ARR_A (ir_node *, nin, ins);
2122 /* Phi merge collects the predecessors and then creates a node. */
2123 res = phi_merge (block, pos, mode, nin, ins);
2125 } else { /* case 1 */
2126 /* The block is not mature, we don't know how many in's are needed. A Phi
2127 with zero predecessors is created. Such a Phi node is called Phi0
2128 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2129 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2131 The Phi0 has to remember the pos of it's internal value. If the real
2132 Phi is computed, pos is used to update the array with the local
2135 res = new_rd_Phi0 (current_ir_graph, block, mode);
2136 res->attr.phi0_pos = pos;
2137 res->link = block->link;
2141 /* If we get here, the frontend missed a use-before-definition error */
2144 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2145 assert (mode->code >= irm_F && mode->code <= irm_P);
2146 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2147 tarval_mode_null[mode->code]);
2150 /* The local valid value is available now. */
2151 block->attr.block.graph_arr[pos] = res;
2154 } /* get_r_value_internal */
2159 it starts the recursion. This causes an Id at the entry of
2160 every block that has no definition of the value! **/
2162 #if USE_EXPLICIT_PHI_IN_STACK
2164 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2165 void free_Phi_in_stack(Phi_in_stack *s) { }
2168 static INLINE ir_node *
2169 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2170 ir_node **in, int ins, ir_node *phi0)
2173 ir_node *res, *known;
2175 /* Allocate a new node on the obstack. The allocation copies the in
2177 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2178 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2180 /* This loop checks whether the Phi has more than one predecessor.
2181 If so, it is a real Phi node and we break the loop. Else the
2182 Phi node merges the same definition on several paths and therefore
2183 is not needed. Don't consider Bad nodes! */
2185 for (i=0; i < ins; ++i)
2189 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2191 /* Optimize self referencing Phis: We can't detect them yet properly, as
2192 they still refer to the Phi0 they will replace. So replace right now. */
2193 if (phi0 && in[i] == phi0) in[i] = res;
2195 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2203 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2206 edges_node_deleted(res, current_ir_graph);
2207 obstack_free (current_ir_graph->obst, res);
2208 if (is_Phi(known)) {
2209 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2210 order, an enclosing Phi know may get superfluous. */
2211 res = optimize_in_place_2(known);
2213 exchange(known, res);
2219 /* A undefined value, e.g., in unreachable code. */
2223 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2224 IRN_VRFY_IRG(res, irg);
2225 /* Memory Phis in endless loops must be kept alive.
2226 As we can't distinguish these easily we keep all of them alive. */
2227 if ((res->op == op_Phi) && (mode == mode_M))
2228 add_End_keepalive(get_irg_end(irg), res);
2232 } /* new_rd_Phi_in */
2235 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2237 #if PRECISE_EXC_CONTEXT
2239 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2242 * Construct a new frag_array for node n.
2243 * Copy the content from the current graph_arr of the corresponding block:
2244 * this is the current state.
2245 * Set ProjM(n) as current memory state.
2246 * Further the last entry in frag_arr of current block points to n. This
2247 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2249 static INLINE ir_node ** new_frag_arr(ir_node *n)
2254 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2255 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2256 sizeof(ir_node *)*current_ir_graph->n_loc);
2258 /* turn off optimization before allocating Proj nodes, as res isn't
2260 opt = get_opt_optimize(); set_optimize(0);
2261 /* Here we rely on the fact that all frag ops have Memory as first result! */
2262 if (get_irn_op(n) == op_Call)
2263 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2264 else if (get_irn_op(n) == op_CopyB)
2265 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2267 assert((pn_Quot_M == pn_DivMod_M) &&
2268 (pn_Quot_M == pn_Div_M) &&
2269 (pn_Quot_M == pn_Mod_M) &&
2270 (pn_Quot_M == pn_Load_M) &&
2271 (pn_Quot_M == pn_Store_M) &&
2272 (pn_Quot_M == pn_Alloc_M) &&
2273 (pn_Quot_M == pn_Bound_M));
2274 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2278 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2280 } /* new_frag_arr */
2283 * Returns the frag_arr from a node.
2285 static INLINE ir_node **get_frag_arr(ir_node *n) {
2286 switch (get_irn_opcode(n)) {
2288 return n->attr.call.exc.frag_arr;
2290 return n->attr.alloc.exc.frag_arr;
2292 return n->attr.load.exc.frag_arr;
2294 return n->attr.store.exc.frag_arr;
2296 return n->attr.except.frag_arr;
2298 } /* get_frag_arr */
2301 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2303 if (!frag_arr[pos]) frag_arr[pos] = val;
2304 if (frag_arr[current_ir_graph->n_loc - 1]) {
2305 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2306 assert(arr != frag_arr && "Endless recursion detected");
2307 set_frag_value(arr, pos, val);
2312 for (i = 0; i < 1000; ++i) {
2313 if (!frag_arr[pos]) {
2314 frag_arr[pos] = val;
2316 if (frag_arr[current_ir_graph->n_loc - 1]) {
2317 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2323 assert(0 && "potential endless recursion");
2325 } /* set_frag_value */
2328 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2332 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2334 frag_arr = get_frag_arr(cfOp);
2335 res = frag_arr[pos];
2337 if (block->attr.block.graph_arr[pos]) {
2338 /* There was a set_value() after the cfOp and no get_value before that
2339 set_value(). We must build a Phi node now. */
2340 if (block->attr.block.matured) {
2341 int ins = get_irn_arity(block);
2343 NEW_ARR_A (ir_node *, nin, ins);
2344 res = phi_merge(block, pos, mode, nin, ins);
2346 res = new_rd_Phi0 (current_ir_graph, block, mode);
2347 res->attr.phi0_pos = pos;
2348 res->link = block->link;
2352 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2353 but this should be better: (remove comment if this works) */
2354 /* It's a Phi, we can write this into all graph_arrs with NULL */
2355 set_frag_value(block->attr.block.graph_arr, pos, res);
2357 res = get_r_value_internal(block, pos, mode);
2358 set_frag_value(block->attr.block.graph_arr, pos, res);
2362 } /* get_r_frag_value_internal */
2363 #endif /* PRECISE_EXC_CONTEXT */
2366 * Computes the predecessors for the real phi node, and then
2367 * allocates and returns this node. The routine called to allocate the
2368 * node might optimize it away and return a real value.
2369 * This function must be called with an in-array of proper size.
2372 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2374 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2377 /* If this block has no value at pos create a Phi0 and remember it
2378 in graph_arr to break recursions.
2379 Else we may not set graph_arr as there a later value is remembered. */
2381 if (!block->attr.block.graph_arr[pos]) {
2382 if (block == get_irg_start_block(current_ir_graph)) {
2383 /* Collapsing to Bad tarvals is no good idea.
2384 So we call a user-supplied routine here that deals with this case as
2385 appropriate for the given language. Sorrily the only help we can give
2386 here is the position.
2388 Even if all variables are defined before use, it can happen that
2389 we get to the start block, if a Cond has been replaced by a tuple
2390 (bad, jmp). In this case we call the function needlessly, eventually
2391 generating an non existent error.
2392 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2395 if (default_initialize_local_variable) {
2396 ir_node *rem = get_cur_block();
2398 set_cur_block(block);
2399 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2403 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2404 /* We don't need to care about exception ops in the start block.
2405 There are none by definition. */
2406 return block->attr.block.graph_arr[pos];
2408 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2409 block->attr.block.graph_arr[pos] = phi0;
2410 #if PRECISE_EXC_CONTEXT
2411 if (get_opt_precise_exc_context()) {
2412 /* Set graph_arr for fragile ops. Also here we should break recursion.
2413 We could choose a cyclic path through an cfop. But the recursion would
2414 break at some point. */
2415 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2421 /* This loop goes to all predecessor blocks of the block the Phi node
2422 is in and there finds the operands of the Phi node by calling
2423 get_r_value_internal. */
2424 for (i = 1; i <= ins; ++i) {
2425 prevCfOp = skip_Proj(block->in[i]);
2427 if (is_Bad(prevCfOp)) {
2428 /* In case a Cond has been optimized we would get right to the start block
2429 with an invalid definition. */
2430 nin[i-1] = new_Bad();
2433 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2435 if (!is_Bad(prevBlock)) {
2436 #if PRECISE_EXC_CONTEXT
2437 if (get_opt_precise_exc_context() &&
2438 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2439 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2440 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2443 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2445 nin[i-1] = new_Bad();
2449 /* We want to pass the Phi0 node to the constructor: this finds additional
2450 optimization possibilities.
2451 The Phi0 node either is allocated in this function, or it comes from
2452 a former call to get_r_value_internal. In this case we may not yet
2453 exchange phi0, as this is done in mature_immBlock. */
2455 phi0_all = block->attr.block.graph_arr[pos];
2456 if (!((get_irn_op(phi0_all) == op_Phi) &&
2457 (get_irn_arity(phi0_all) == 0) &&
2458 (get_nodes_block(phi0_all) == block)))
2464 /* After collecting all predecessors into the array nin a new Phi node
2465 with these predecessors is created. This constructor contains an
2466 optimization: If all predecessors of the Phi node are identical it
2467 returns the only operand instead of a new Phi node. */
2468 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2470 /* In case we allocated a Phi0 node at the beginning of this procedure,
2471 we need to exchange this Phi0 with the real Phi. */
2473 exchange(phi0, res);
2474 block->attr.block.graph_arr[pos] = res;
2475 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2476 only an optimization. */
2483 * This function returns the last definition of a variable. In case
2484 * this variable was last defined in a previous block, Phi nodes are
2485 * inserted. If the part of the firm graph containing the definition
2486 * is not yet constructed, a dummy Phi node is returned.
2489 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2492 /* There are 4 cases to treat.
2494 1. The block is not mature and we visit it the first time. We can not
2495 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2496 predecessors is returned. This node is added to the linked list (field
2497 "link") of the containing block to be completed when this block is
2498 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2501 2. The value is already known in this block, graph_arr[pos] is set and we
2502 visit the block the first time. We can return the value without
2503 creating any new nodes.
2505 3. The block is mature and we visit it the first time. A Phi node needs
2506 to be created (phi_merge). If the Phi is not needed, as all it's
2507 operands are the same value reaching the block through different
2508 paths, it's optimized away and the value itself is returned.
2510 4. The block is mature, and we visit it the second time. Now two
2511 subcases are possible:
2512 * The value was computed completely the last time we were here. This
2513 is the case if there is no loop. We can return the proper value.
2514 * The recursion that visited this node and set the flag did not
2515 return yet. We are computing a value in a loop and need to
2516 break the recursion. This case only happens if we visited
2517 the same block with phi_merge before, which inserted a Phi0.
2518 So we return the Phi0.
2521 /* case 4 -- already visited. */
2522 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2523 /* As phi_merge allocates a Phi0 this value is always defined. Here
2524 is the critical difference of the two algorithms. */
2525 assert(block->attr.block.graph_arr[pos]);
2526 return block->attr.block.graph_arr[pos];
2529 /* visited the first time */
2530 set_irn_visited(block, get_irg_visited(current_ir_graph));
2532 /* Get the local valid value */
2533 res = block->attr.block.graph_arr[pos];
2535 /* case 2 -- If the value is actually computed, return it. */
2536 if (res) { return res; };
2538 if (block->attr.block.matured) { /* case 3 */
2540 /* The Phi has the same amount of ins as the corresponding block. */
2541 int ins = get_irn_arity(block);
2543 NEW_ARR_A (ir_node *, nin, ins);
2545 /* Phi merge collects the predecessors and then creates a node. */
2546 res = phi_merge (block, pos, mode, nin, ins);
2548 } else { /* case 1 */
2549 /* The block is not mature, we don't know how many in's are needed. A Phi
2550 with zero predecessors is created. Such a Phi node is called Phi0
2551 node. The Phi0 is then added to the list of Phi0 nodes in this block
2552 to be matured by mature_immBlock later.
2553 The Phi0 has to remember the pos of it's internal value. If the real
2554 Phi is computed, pos is used to update the array with the local
2556 res = new_rd_Phi0 (current_ir_graph, block, mode);
2557 res->attr.phi0_pos = pos;
2558 res->link = block->link;
2562 /* If we get here, the frontend missed a use-before-definition error */
2565 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2566 assert (mode->code >= irm_F && mode->code <= irm_P);
2567 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2568 get_mode_null(mode));
2571 /* The local valid value is available now. */
2572 block->attr.block.graph_arr[pos] = res;
2575 } /* get_r_value_internal */
2577 #endif /* USE_FAST_PHI_CONSTRUCTION */
2579 /* ************************************************************************** */
2582 * Finalize a Block node, when all control flows are known.
2583 * Acceptable parameters are only Block nodes.
2586 mature_immBlock(ir_node *block)
2592 assert (get_irn_opcode(block) == iro_Block);
2593 /* @@@ should be commented in
2594 assert (!get_Block_matured(block) && "Block already matured"); */
2596 if (!get_Block_matured(block)) {
2597 ins = ARR_LEN (block->in)-1;
2598 /* Fix block parameters */
2599 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2601 /* An array for building the Phi nodes. */
2602 NEW_ARR_A (ir_node *, nin, ins);
2604 /* Traverse a chain of Phi nodes attached to this block and mature
2606 for (n = block->link; n; n = next) {
2607 inc_irg_visited(current_ir_graph);
2609 exchange(n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2612 block->attr.block.matured = 1;
2614 /* Now, as the block is a finished firm node, we can optimize it.
2615 Since other nodes have been allocated since the block was created
2616 we can not free the node on the obstack. Therefore we have to call
2618 Unfortunately the optimization does not change a lot, as all allocated
2619 nodes refer to the unoptimized node.
2620 We can call _2, as global cse has no effect on blocks. */
2621 block = optimize_in_place_2(block);
2622 IRN_VRFY_IRG(block, current_ir_graph);
2624 } /* mature_immBlock */
2627 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2628 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2632 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2633 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2637 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2638 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2639 } /* new_d_Const_long */
2642 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2643 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2644 } /* new_d_Const_type */
2648 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2649 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2653 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2654 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2658 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2660 assert(arg->op == op_Cond);
2661 arg->attr.cond.kind = fragmentary;
2662 arg->attr.cond.default_proj = max_proj;
2663 res = new_Proj(arg, mode_X, max_proj);
2665 } /* new_d_defaultProj */
2668 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2669 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2673 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2674 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2675 } /* new_d_strictConv */
2678 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2679 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2683 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2684 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2693 * Allocate the frag array.
2695 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2696 if (get_opt_precise_exc_context()) {
2697 if ((current_ir_graph->phase_state == phase_building) &&
2698 (get_irn_op(res) == op) && /* Could be optimized away. */
2699 !*frag_store) /* Could be a cse where the arr is already set. */ {
2700 *frag_store = new_frag_arr(res);
2703 } /* allocate_frag_arr */
2706 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2708 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2);
2709 res->attr.except.pin_state = op_pin_state_pinned;
2710 #if PRECISE_EXC_CONTEXT
2711 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2718 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2720 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2);
2721 res->attr.except.pin_state = op_pin_state_pinned;
2722 #if PRECISE_EXC_CONTEXT
2723 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2727 } /* new_d_DivMod */
2730 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2733 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2);
2734 res->attr.except.pin_state = op_pin_state_pinned;
2735 #if PRECISE_EXC_CONTEXT
2736 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2743 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2745 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2);
2746 res->attr.except.pin_state = op_pin_state_pinned;
2747 #if PRECISE_EXC_CONTEXT
2748 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2767 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2768 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2772 new_d_Jmp(dbg_info *db) {
2773 return new_bd_Jmp(db, current_ir_graph->current_block);
2777 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2778 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2782 new_d_Cond(dbg_info *db, ir_node *c) {
2783 return new_bd_Cond(db, current_ir_graph->current_block, c);
2787 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2791 res = new_bd_Call(db, current_ir_graph->current_block,
2792 store, callee, arity, in, tp);
2793 #if PRECISE_EXC_CONTEXT
2794 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2801 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2802 return new_bd_Return(db, current_ir_graph->current_block,
2804 } /* new_d_Return */
2807 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2809 res = new_bd_Load(db, current_ir_graph->current_block,
2811 #if PRECISE_EXC_CONTEXT
2812 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2819 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2821 res = new_bd_Store(db, current_ir_graph->current_block,
2823 #if PRECISE_EXC_CONTEXT
2824 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2831 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2835 res = new_bd_Alloc(db, current_ir_graph->current_block,
2836 store, size, alloc_type, where);
2837 #if PRECISE_EXC_CONTEXT
2838 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2845 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2846 ir_node *size, ir_type *free_type, where_alloc where)
2848 return new_bd_Free(db, current_ir_graph->current_block,
2849 store, ptr, size, free_type, where);
2853 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2854 /* GL: objptr was called frame before. Frame was a bad choice for the name
2855 as the operand could as well be a pointer to a dynamic object. */
2857 return new_bd_Sel(db, current_ir_graph->current_block,
2858 store, objptr, 0, NULL, ent);
2859 } /* new_d_simpleSel */
2862 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel)
2864 return new_bd_Sel(db, current_ir_graph->current_block,
2865 store, objptr, n_index, index, sel);
2869 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2871 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2873 } /* new_d_SymConst_type */
2876 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind)
2878 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2879 value, kind, firm_unknown_type);
2880 } /* new_d_SymConst */
2883 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2884 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2890 return _new_d_Bad();
2894 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2895 return new_bd_Confirm(db, current_ir_graph->current_block,
2897 } /* new_d_Confirm */
2900 new_d_Unknown(ir_mode *m) {
2901 return new_bd_Unknown(m);
2902 } /* new_d_Unknown */
2905 new_d_CallBegin(dbg_info *db, ir_node *call) {
2906 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2907 } /* new_d_CallBegin */
2910 new_d_EndReg(dbg_info *db) {
2911 return new_bd_EndReg(db, current_ir_graph->current_block);
2912 } /* new_d_EndReg */
2915 new_d_EndExcept(dbg_info *db) {
2916 return new_bd_EndExcept(db, current_ir_graph->current_block);
2917 } /* new_d_EndExcept */
2920 new_d_Break(dbg_info *db) {
2921 return new_bd_Break(db, current_ir_graph->current_block);
2925 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2926 return new_bd_Filter (db, current_ir_graph->current_block,
2928 } /* new_d_Filter */
2931 (new_d_NoMem)(void) {
2932 return _new_d_NoMem();
2936 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2937 ir_node *ir_true, ir_mode *mode) {
2938 return new_bd_Mux(db, current_ir_graph->current_block,
2939 sel, ir_false, ir_true, mode);
2943 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2944 return new_bd_Psi(db, current_ir_graph->current_block,
2945 arity, conds, vals, mode);
2948 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2949 ir_node *dst, ir_node *src, ir_type *data_type) {
2951 res = new_bd_CopyB(db, current_ir_graph->current_block,
2952 store, dst, src, data_type);
2953 #if PRECISE_EXC_CONTEXT
2954 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2960 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2961 return new_bd_InstOf(db, current_ir_graph->current_block,
2962 store, objptr, type);
2963 } /* new_d_InstOf */
2966 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2967 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2970 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2971 ir_node *idx, ir_node *lower, ir_node *upper) {
2973 res = new_bd_Bound(db, current_ir_graph->current_block,
2974 store, idx, lower, upper);
2975 #if PRECISE_EXC_CONTEXT
2976 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2982 new_d_Pin(dbg_info *db, ir_node *node) {
2983 return new_bd_Pin(db, current_ir_graph->current_block, node);
2986 /* ********************************************************************* */
2987 /* Comfortable interface with automatic Phi node construction. */
2988 /* (Uses also constructors of ?? interface, except new_Block. */
2989 /* ********************************************************************* */
2991 /* Block construction */
2992 /* immature Block without predecessors */
2993 ir_node *new_d_immBlock(dbg_info *db) {
2996 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2997 /* creates a new dynamic in-array as length of in is -1 */
2998 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2999 current_ir_graph->current_block = res;
3000 res->attr.block.matured = 0;
3001 res->attr.block.dead = 0;
3002 /* res->attr.block.exc = exc_normal; */
3003 /* res->attr.block.handler_entry = 0; */
3004 res->attr.block.irg = current_ir_graph;
3005 res->attr.block.backedge = NULL;
3006 res->attr.block.in_cg = NULL;
3007 res->attr.block.cg_backedge = NULL;
3008 set_Block_block_visited(res, 0);
3010 /* Create and initialize array for Phi-node construction. */
3011 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3012 current_ir_graph->n_loc);
3013 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3015 /* Immature block may not be optimized! */
3016 IRN_VRFY_IRG(res, current_ir_graph);
3019 } /* new_d_immBlock */
3022 new_immBlock(void) {
3023 return new_d_immBlock(NULL);
3024 } /* new_immBlock */
3026 /* add an edge to a jmp/control flow node */
3028 add_immBlock_pred(ir_node *block, ir_node *jmp)
3030 if (block->attr.block.matured) {
3031 assert(0 && "Error: Block already matured!\n");
3034 assert(jmp != NULL);
3035 ARR_APP1(ir_node *, block->in, jmp);
3037 } /* add_immBlock_pred */
3039 /* changing the current block */
3041 set_cur_block(ir_node *target) {
3042 current_ir_graph->current_block = target;
3043 } /* set_cur_block */
3045 /* ************************ */
3046 /* parameter administration */
3048 /* get a value from the parameter array from the current block by its index */
3050 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3051 ir_graph *irg = current_ir_graph;
3052 assert(get_irg_phase_state(irg) == phase_building);
3053 inc_irg_visited(irg);
3055 return get_r_value_internal(irg->current_block, pos + 1, mode);
3058 /* get a value from the parameter array from the current block by its index */
3060 get_value(int pos, ir_mode *mode) {
3061 return get_d_value(NULL, pos, mode);
3064 /* set a value at position pos in the parameter array from the current block */
3066 set_value(int pos, ir_node *value) {
3067 ir_graph *irg = current_ir_graph;
3068 assert(get_irg_phase_state(irg) == phase_building);
3069 assert(pos+1 < irg->n_loc);
3070 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3073 /* Find the value number for a node in the current block.*/
3075 find_value(ir_node *value) {
3077 ir_node *bl = current_ir_graph->current_block;
3079 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3080 if (bl->attr.block.graph_arr[i] == value)
3085 /* get the current store */
3088 ir_graph *irg = current_ir_graph;
3090 assert(get_irg_phase_state(irg) == phase_building);
3091 /* GL: one could call get_value instead */
3092 inc_irg_visited(irg);
3093 return get_r_value_internal(irg->current_block, 0, mode_M);
3096 /* set the current store: handles automatic Sync construction for Load nodes */
3098 set_store(ir_node *store)
3100 ir_node *load, *pload, *pred, *in[2];
3102 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3103 /* Beware: due to dead code elimination, a store might become a Bad node even in
3104 the construction phase. */
3105 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3107 if (get_opt_auto_create_sync()) {
3108 /* handle non-volatile Load nodes by automatically creating Sync's */
3109 load = skip_Proj(store);
3110 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3111 pred = get_Load_mem(load);
3113 if (is_Sync(pred)) {
3114 /* a Load after a Sync: move it up */
3115 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3117 set_Load_mem(load, get_memop_mem(mem));
3118 add_Sync_pred(pred, store);
3122 pload = skip_Proj(pred);
3123 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3124 /* a Load after a Load: create a new Sync */
3125 set_Load_mem(load, get_Load_mem(pload));
3129 store = new_Sync(2, in);
3134 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3138 keep_alive(ir_node *ka) {
3139 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3142 /* --- Useful access routines --- */
3143 /* Returns the current block of the current graph. To set the current
3144 block use set_cur_block. */
3145 ir_node *get_cur_block(void) {
3146 return get_irg_current_block(current_ir_graph);
3147 } /* get_cur_block */
3149 /* Returns the frame type of the current graph */
3150 ir_type *get_cur_frame_type(void) {
3151 return get_irg_frame_type(current_ir_graph);
3152 } /* get_cur_frame_type */
3155 /* ********************************************************************* */
3158 /* call once for each run of the library */
3160 init_cons(uninitialized_local_variable_func_t *func) {
3161 default_initialize_local_variable = func;
3165 irp_finalize_cons(void) {
3167 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3168 irg_finalize_cons(get_irp_irg(i));
3170 irp->phase_state = phase_high;
3171 } /* irp_finalize_cons */
3174 ir_node *new_Block(int arity, ir_node **in) {
3175 return new_d_Block(NULL, arity, in);
3177 ir_node *new_Start (void) {
3178 return new_d_Start(NULL);
3180 ir_node *new_End (void) {
3181 return new_d_End(NULL);
3183 ir_node *new_Jmp (void) {
3184 return new_d_Jmp(NULL);
3186 ir_node *new_IJmp (ir_node *tgt) {
3187 return new_d_IJmp(NULL, tgt);
3189 ir_node *new_Cond (ir_node *c) {
3190 return new_d_Cond(NULL, c);
3192 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3193 return new_d_Return(NULL, store, arity, in);
3195 ir_node *new_Const (ir_mode *mode, tarval *con) {
3196 return new_d_Const(NULL, mode, con);
3199 ir_node *new_Const_long(ir_mode *mode, long value)
3201 return new_d_Const_long(NULL, mode, value);
3204 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3205 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3208 ir_node *new_SymConst_type (symconst_symbol value, symconst_kind kind, ir_type *type) {
3209 return new_d_SymConst_type(NULL, value, kind, type);
3211 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3212 return new_d_SymConst(NULL, value, kind);
3214 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3215 return new_d_simpleSel(NULL, store, objptr, ent);
3217 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3219 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3221 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3223 return new_d_Call(NULL, store, callee, arity, in, tp);
3225 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3226 return new_d_Add(NULL, op1, op2, mode);
3228 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3229 return new_d_Sub(NULL, op1, op2, mode);
3231 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3232 return new_d_Minus(NULL, op, mode);
3234 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3235 return new_d_Mul(NULL, op1, op2, mode);
3237 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3238 return new_d_Quot(NULL, memop, op1, op2);
3240 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3241 return new_d_DivMod(NULL, memop, op1, op2);
3243 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3244 return new_d_Div(NULL, memop, op1, op2);
3246 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3247 return new_d_Mod(NULL, memop, op1, op2);
3249 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3250 return new_d_Abs(NULL, op, mode);
3252 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3253 return new_d_And(NULL, op1, op2, mode);
3255 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3256 return new_d_Or(NULL, op1, op2, mode);
3258 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3259 return new_d_Eor(NULL, op1, op2, mode);
3261 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3262 return new_d_Not(NULL, op, mode);
3264 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3265 return new_d_Shl(NULL, op, k, mode);
3267 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3268 return new_d_Shr(NULL, op, k, mode);
3270 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3271 return new_d_Shrs(NULL, op, k, mode);
3273 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3274 return new_d_Rot(NULL, op, k, mode);
3276 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3277 return new_d_Carry(NULL, op1, op2, mode);
3279 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3280 return new_d_Borrow(NULL, op1, op2, mode);
3282 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3283 return new_d_Cmp(NULL, op1, op2);
3285 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3286 return new_d_Conv(NULL, op, mode);
3288 ir_node *new_strictConv (ir_node *op, ir_mode *mode) {
3289 return new_d_strictConv(NULL, op, mode);
3291 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3292 return new_d_Cast(NULL, op, to_tp);
3294 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3295 return new_d_Phi(NULL, arity, in, mode);
3297 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3298 return new_d_Load(NULL, store, addr, mode);
3300 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3301 return new_d_Store(NULL, store, addr, val);
3303 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3304 where_alloc where) {
3305 return new_d_Alloc(NULL, store, size, alloc_type, where);
3307 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3308 ir_type *free_type, where_alloc where) {
3309 return new_d_Free(NULL, store, ptr, size, free_type, where);
3311 ir_node *new_Sync (int arity, ir_node *in[]) {
3312 return new_d_Sync(NULL, arity, in);
3314 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3315 return new_d_Proj(NULL, arg, mode, proj);
3317 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3318 return new_d_defaultProj(NULL, arg, max_proj);
3320 ir_node *new_Tuple (int arity, ir_node **in) {
3321 return new_d_Tuple(NULL, arity, in);
3323 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3324 return new_d_Id(NULL, val, mode);
3326 ir_node *new_Bad (void) {
3329 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3330 return new_d_Confirm (NULL, val, bound, cmp);
3332 ir_node *new_Unknown(ir_mode *m) {
3333 return new_d_Unknown(m);
3335 ir_node *new_CallBegin (ir_node *callee) {
3336 return new_d_CallBegin(NULL, callee);
3338 ir_node *new_EndReg (void) {
3339 return new_d_EndReg(NULL);
3341 ir_node *new_EndExcept (void) {
3342 return new_d_EndExcept(NULL);
3344 ir_node *new_Break (void) {
3345 return new_d_Break(NULL);
3347 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3348 return new_d_Filter(NULL, arg, mode, proj);
3350 ir_node *new_NoMem (void) {
3351 return new_d_NoMem();
3353 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3354 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3356 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3357 return new_d_Psi(NULL, arity, conds, vals, mode);
3359 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3360 return new_d_CopyB(NULL, store, dst, src, data_type);
3362 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3363 return new_d_InstOf (NULL, store, objptr, ent);
3365 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3366 return new_d_Raise(NULL, store, obj);
3368 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3369 return new_d_Bound(NULL, store, idx, lower, upper);
3371 ir_node *new_Pin(ir_node *node) {
3372 return new_d_Pin(NULL, node);