2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
46 /* when we need verifying */
48 # define IRN_VRFY_IRG(res, irg)
50 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
54 * Language dependent variable initialization callback.
56 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
58 /* creates a bd constructor for a binop */
59 #define NEW_BD_BINOP(instr) \
61 new_bd_##instr(dbg_info *db, ir_node *block, \
62 ir_node *op1, ir_node *op2, ir_mode *mode) \
66 ir_graph *irg = current_ir_graph; \
69 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
70 res = optimize_node(res); \
71 IRN_VRFY_IRG(res, irg); \
75 /* creates a bd constructor for an unop */
76 #define NEW_BD_UNOP(instr) \
78 new_bd_##instr(dbg_info *db, ir_node *block, \
79 ir_node *op, ir_mode *mode) \
82 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an divop */
90 #define NEW_BD_DIVOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
97 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
102 res->attr.divmod.exc.pin_state = state; \
103 res->attr.divmod.resmode = mode; \
104 res->attr.divmod.no_remainder = 0; \
105 res = optimize_node(res); \
106 IRN_VRFY_IRG(res, irg); \
110 /* creates a rd constructor for a binop */
111 #define NEW_RD_BINOP(instr) \
113 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
114 ir_node *op1, ir_node *op2, ir_mode *mode) \
117 ir_graph *rem = current_ir_graph; \
118 current_ir_graph = irg; \
119 res = new_bd_##instr(db, block, op1, op2, mode); \
120 current_ir_graph = rem; \
124 /* creates a rd constructor for an unop */
125 #define NEW_RD_UNOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an divop */
139 #define NEW_RD_DIVOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
148 current_ir_graph = rem; \
152 /* creates a d constructor for an binop */
153 #define NEW_D_BINOP(instr) \
155 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
156 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
159 /* creates a d constructor for an unop */
160 #define NEW_D_UNOP(instr) \
162 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
163 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
166 #include "gen_ir_cons.c.inl"
168 static ir_node *new_bd_Start(dbg_info *db, ir_node *block)
171 ir_graph *irg = current_ir_graph;
173 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
175 IRN_VRFY_IRG(res, irg);
179 static ir_node *new_bd_End(dbg_info *db, ir_node *block)
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
186 IRN_VRFY_IRG(res, irg);
191 * Creates a Phi node with all predecessors. Calling this constructor
192 * is only allowed if the corresponding block is mature.
194 static ir_node *new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
197 ir_graph *irg = current_ir_graph;
201 /* Don't assert that block matured: the use of this constructor is strongly
203 if (get_Block_matured(block))
204 assert(get_irn_arity(block) == arity);
206 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
208 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
210 for (i = arity - 1; i >= 0; --i)
211 if (is_Unknown(in[i])) {
216 if (!has_unknown) res = optimize_node(res);
217 IRN_VRFY_IRG(res, irg);
219 /* Memory Phis in endless loops must be kept alive.
220 As we can't distinguish these easily we keep all of them alive. */
221 if (is_Phi(res) && mode == mode_M)
222 add_End_keepalive(get_irg_end(irg), res);
226 static ir_node *new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
229 ir_graph *irg = current_ir_graph;
231 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
232 res->attr.con.tv = con;
233 set_Const_type(res, tp); /* Call method because of complex assertion. */
234 res = optimize_node (res);
235 assert(get_Const_type(res) == tp);
236 IRN_VRFY_IRG(res, irg);
239 } /* new_bd_Const_type */
241 static ir_node *new_bd_Const(dbg_info *db, tarval *con)
243 ir_graph *irg = current_ir_graph;
245 return new_rd_Const_type(db, irg, con, firm_unknown_type);
248 static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
250 ir_graph *irg = current_ir_graph;
252 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
253 } /* new_bd_Const_long */
255 static ir_node *new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
260 assert(arg->op == op_Cond);
261 arg->attr.cond.default_proj = max_proj;
262 res = new_rd_Proj(db, block, arg, mode_X, max_proj);
264 } /* new_bd_defaultProj */
266 static ir_node *new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store,
267 ir_node *objptr, int arity, ir_node **in,
273 ir_graph *irg = current_ir_graph;
274 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
276 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
279 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
282 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
284 * Sel's can select functions which should be of mode mode_P_code.
286 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
287 res->attr.sel.entity = ent;
288 res = optimize_node(res);
289 IRN_VRFY_IRG(res, irg);
293 static ir_node *new_bd_SymConst_type(dbg_info *db, ir_node *block,
294 ir_mode *mode, symconst_symbol value,
295 symconst_kind symkind, ir_type *tp)
297 ir_graph *irg = current_ir_graph;
298 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
300 res->attr.symc.kind = symkind;
301 res->attr.symc.sym = value;
302 res->attr.symc.tp = tp;
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
307 } /* new_bd_SymConst_type */
309 static ir_node *new_bd_Sync(dbg_info *db, ir_node *block)
312 ir_graph *irg = current_ir_graph;
314 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
315 /* no need to call optimize node here, Sync are always created with no predecessors */
316 IRN_VRFY_IRG(res, irg);
321 static ir_node *new_bd_EndReg(dbg_info *db, ir_node *block)
324 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
327 set_irg_end_reg(irg, res);
328 IRN_VRFY_IRG(res, irg);
330 } /* new_bd_EndReg */
332 static ir_node *new_bd_EndExcept(dbg_info *db, ir_node *block)
335 ir_graph *irg = current_ir_graph;
337 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
338 set_irg_end_except(irg, res);
339 IRN_VRFY_IRG (res, irg);
341 } /* new_bd_EndExcept */
343 static ir_node *new_bd_ASM(dbg_info *db, ir_node *block, int arity,
344 ir_node *in[], ir_asm_constraint *inputs, int n_outs,
345 ir_asm_constraint *outputs, int n_clobber,
346 ident *clobber[], ident *asm_text)
349 ir_graph *irg = current_ir_graph;
351 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
352 res->attr.assem.pin_state = op_pin_state_pinned;
353 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
354 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
355 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
356 res->attr.assem.asm_text = asm_text;
358 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
359 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
360 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
367 /* --------------------------------------------- */
368 /* private interfaces, for professional use only */
369 /* --------------------------------------------- */
371 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
373 ir_graph *rem = current_ir_graph;
376 current_ir_graph = irg;
377 res = new_bd_Start(db, block);
378 current_ir_graph = rem;
383 ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
386 ir_graph *rem = current_ir_graph;
388 current_ir_graph = irg;
389 res = new_bd_End(db, block);
390 current_ir_graph = rem;
395 /* Creates a Phi node with all predecessors. Calling this constructor
396 is only allowed if the corresponding block is mature. */
397 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
400 ir_graph *rem = current_ir_graph;
402 current_ir_graph = get_Block_irg(block);
403 res = new_bd_Phi(db, block,arity, in, mode);
404 current_ir_graph = rem;
409 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
412 ir_graph *rem = current_ir_graph;
414 current_ir_graph = irg;
415 res = new_bd_Const_type(db, con, tp);
416 current_ir_graph = rem;
419 } /* new_rd_Const_type */
421 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
424 //#ifdef USE_ORIGINAL
425 ir_graph *rem = current_ir_graph;
427 current_ir_graph = irg;
428 res = new_bd_Const_type(db, con, firm_unknown_type);
429 current_ir_graph = rem;
431 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
437 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
439 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
440 } /* new_rd_Const_long */
442 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg, long max_proj)
445 ir_graph *rem = current_ir_graph;
447 current_ir_graph = get_Block_irg(block);
448 res = new_bd_defaultProj(db, block, arg, max_proj);
449 current_ir_graph = rem;
452 } /* new_rd_defaultProj */
454 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
455 ir_node *objptr, ir_entity *ent)
458 ir_graph *rem = current_ir_graph;
460 current_ir_graph = get_Block_irg(block);
461 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
462 current_ir_graph = rem;
465 } /* new_rd_simpleSel */
467 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
468 symconst_symbol value, symconst_kind symkind,
472 ir_graph *rem = current_ir_graph;
473 ir_node *block = get_irg_start_block(irg);
475 current_ir_graph = irg;
476 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
477 current_ir_graph = rem;
480 } /* new_rd_SymConst_type */
482 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
483 symconst_symbol value, symconst_kind symkind)
485 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
486 } /* new_rd_SymConst */
488 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
491 sym.entity_p = symbol;
492 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
493 } /* new_rd_SymConst_addr_ent */
495 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
498 sym.entity_p = symbol;
499 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
500 } /* new_rd_SymConst_ofs_ent */
502 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
506 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
507 } /* new_rd_SymConst_type_tag */
509 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
513 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
514 } /* new_rd_SymConst_size */
516 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
520 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
521 } /* new_rd_SymConst_align */
523 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
526 ir_graph *rem = current_ir_graph;
529 current_ir_graph = get_Block_irg(block);
530 res = new_bd_Sync(db, block);
531 current_ir_graph = rem;
533 for (i = 0; i < arity; ++i)
534 add_Sync_pred(res, in[i]);
539 ir_node *new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
543 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
544 set_irg_end_reg(irg, res);
545 IRN_VRFY_IRG(res, irg);
547 } /* new_rd_EndReg */
549 ir_node *new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
553 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
554 set_irg_end_except(irg, res);
555 IRN_VRFY_IRG (res, irg);
557 } /* new_rd_EndExcept */
559 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
560 int arity, ir_node *in[], ir_asm_constraint *inputs,
561 int n_outs, ir_asm_constraint *outputs,
562 int n_clobber, ident *clobber[], ident *asm_text)
565 ir_graph *rem = current_ir_graph;
567 current_ir_graph = get_Block_irg(block);
568 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
569 current_ir_graph = rem;
574 ir_node *new_r_Start(ir_graph *irg, ir_node *block)
576 return new_rd_Start(NULL, irg, block);
578 ir_node *new_r_End(ir_graph *irg, ir_node *block)
580 return new_rd_End(NULL, irg, block);
582 ir_node *new_r_Const(ir_graph *irg, tarval *con)
584 return new_rd_Const(NULL, irg, con);
586 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
588 return new_rd_Const_long(NULL, irg, mode, value);
590 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
592 return new_rd_Const_type(NULL, irg, con, tp);
594 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
595 symconst_kind symkind)
597 return new_rd_SymConst(NULL, irg, mode, value, symkind);
599 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
602 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
604 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
606 return new_rd_Phi(NULL, block, arity, in, mode);
608 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
610 return new_rd_Sync(NULL, block, arity, in);
612 ir_node *new_r_defaultProj(ir_node *block, ir_node *arg, long max_proj)
614 return new_rd_defaultProj(NULL, block, arg, max_proj);
616 ir_node *new_r_Bad(ir_graph *irg)
618 return get_irg_bad(irg);
620 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block)
622 return new_rd_EndReg(NULL, irg, block);
624 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block)
626 return new_rd_EndExcept(NULL, irg, block);
628 ir_node *new_r_NoMem(ir_graph *irg)
630 return get_irg_no_mem(irg);
632 ir_node *new_r_ASM(ir_node *block,
633 int arity, ir_node *in[], ir_asm_constraint *inputs,
634 int n_outs, ir_asm_constraint *outputs,
635 int n_clobber, ident *clobber[], ident *asm_text)
637 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
640 /** ********************/
641 /** public interfaces */
642 /** construction tools */
644 ir_node *new_d_Start(dbg_info *db)
648 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
649 op_Start, mode_T, 0, NULL);
651 res = optimize_node(res);
652 IRN_VRFY_IRG(res, current_ir_graph);
656 ir_node *new_d_End(dbg_info *db)
659 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
660 op_End, mode_X, -1, NULL);
661 res = optimize_node(res);
662 IRN_VRFY_IRG(res, current_ir_graph);
667 /* ***********************************************************************/
668 /* Methods necessary for automatic Phi node creation */
670 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
671 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
672 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
673 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
675 Call Graph: ( A ---> B == A "calls" B)
677 get_value mature_immBlock
685 get_r_value_internal |
689 new_rd_Phi0 new_rd_Phi_in
691 * *************************************************************************** */
693 /** Creates a Phi node with 0 predecessors. */
694 static inline ir_node *new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
698 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
699 IRN_VRFY_IRG(res, irg);
705 * Internal constructor of a Phi node by a phi_merge operation.
707 * @param irg the graph on which the Phi will be constructed
708 * @param block the block in which the Phi will be constructed
709 * @param mode the mod eof the Phi node
710 * @param in the input array of the phi node
711 * @param ins number of elements in the input array
712 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
713 * the value for which the new Phi is constructed
715 static inline ir_node *new_rd_Phi_in(ir_graph *irg, ir_node *block,
716 ir_mode *mode, ir_node **in, int ins,
720 ir_node *res, *known;
722 /* Allocate a new node on the obstack. The allocation copies the in
724 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
725 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
727 /* This loop checks whether the Phi has more than one predecessor.
728 If so, it is a real Phi node and we break the loop. Else the
729 Phi node merges the same definition on several paths and therefore
731 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
733 for (i = ins - 1; i >= 0; --i) {
736 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
738 /* Optimize self referencing Phis: We can't detect them yet properly, as
739 they still refer to the Phi0 they will replace. So replace right now. */
740 if (phi0 && in[i] == phi0)
743 if (in[i] == res || in[i] == known)
752 /* i < 0: there is at most one predecessor, we don't need a phi node. */
755 edges_node_deleted(res, current_ir_graph);
756 obstack_free(current_ir_graph->obst, res);
758 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
759 order, an enclosing Phi know may get superfluous. */
760 res = optimize_in_place_2(known);
762 exchange(known, res);
767 /* A undefined value, e.g., in unreachable code. */
771 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
772 IRN_VRFY_IRG(res, irg);
773 /* Memory Phis in endless loops must be kept alive.
774 As we can't distinguish these easily we keep all of them alive. */
775 if (is_Phi(res) && mode == mode_M)
776 add_End_keepalive(get_irg_end(irg), res);
780 } /* new_rd_Phi_in */
782 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
784 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
787 * Construct a new frag_array for node n.
788 * Copy the content from the current graph_arr of the corresponding block:
789 * this is the current state.
790 * Set ProjM(n) as current memory state.
791 * Further the last entry in frag_arr of current block points to n. This
792 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
794 static inline ir_node **new_frag_arr(ir_node *n)
799 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
800 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
801 sizeof(ir_node *)*current_ir_graph->n_loc);
803 /* turn off optimization before allocating Proj nodes, as res isn't
805 opt = get_opt_optimize(); set_optimize(0);
806 /* Here we rely on the fact that all frag ops have Memory as first result! */
808 arr[0] = new_Proj(n, mode_M, pn_Call_M);
809 } else if (is_CopyB(n)) {
810 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
812 assert((pn_Quot_M == pn_DivMod_M) &&
813 (pn_Quot_M == pn_Div_M) &&
814 (pn_Quot_M == pn_Mod_M) &&
815 (pn_Quot_M == pn_Load_M) &&
816 (pn_Quot_M == pn_Store_M) &&
817 (pn_Quot_M == pn_Alloc_M) &&
818 (pn_Quot_M == pn_Bound_M));
819 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
823 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
828 * Returns the frag_arr from a node.
830 static inline ir_node **get_frag_arr(ir_node *n)
832 switch (get_irn_opcode(n)) {
834 return n->attr.call.exc.frag_arr;
836 return n->attr.alloc.exc.frag_arr;
838 return n->attr.load.exc.frag_arr;
840 return n->attr.store.exc.frag_arr;
842 return n->attr.except.frag_arr;
846 static void set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
851 for (i = 1024; i >= 0; --i)
856 if (frag_arr[pos] == NULL)
858 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
859 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
860 assert(arr != frag_arr && "Endless recursion detected");
865 assert(!"potential endless recursion in set_frag_value");
866 } /* set_frag_value */
868 static ir_node *get_r_frag_value_internal(ir_node *block, ir_node *cfOp,
869 int pos, ir_mode *mode)
874 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
876 frag_arr = get_frag_arr(cfOp);
879 if (block->attr.block.graph_arr[pos] != NULL) {
880 /* There was a set_value() after the cfOp and no get_value() before that
881 set_value(). We must build a Phi node now. */
882 if (block->attr.block.is_matured) {
883 int ins = get_irn_arity(block);
885 NEW_ARR_A(ir_node *, nin, ins);
886 res = phi_merge(block, pos, mode, nin, ins);
888 res = new_rd_Phi0(current_ir_graph, block, mode);
889 res->attr.phi.u.pos = pos;
890 res->attr.phi.next = block->attr.block.phis;
891 block->attr.block.phis = res;
894 /* It's a Phi, we can write this into all graph_arrs with NULL */
895 set_frag_value(block->attr.block.graph_arr, pos, res);
897 res = get_r_value_internal(block, pos, mode);
898 set_frag_value(block->attr.block.graph_arr, pos, res);
902 } /* get_r_frag_value_internal */
905 * Check whether a control flownode cf_pred represents an exception flow.
907 * @param cf_pred the control flow node
908 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
910 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
913 * Note: all projections from a raise are "exceptional control flow" we we handle it
914 * like a normal Jmp, because there is no "regular" one.
915 * That's why Raise is no "fragile_op"!
917 if (is_fragile_op(prev_cf_op)) {
918 if (is_Proj(cf_pred)) {
919 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
920 /* the regular control flow, NO exception */
923 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
926 /* Hmm, exception but not a Proj? */
927 assert(!"unexpected condition: fragile op without a proj");
931 } /* is_exception_flow */
934 * Computes the predecessors for the real phi node, and then
935 * allocates and returns this node. The routine called to allocate the
936 * node might optimize it away and return a real value.
937 * This function must be called with an in-array of proper size.
939 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
941 ir_node *prevBlock, *res, *phi0, *phi0_all;
944 /* If this block has no value at pos create a Phi0 and remember it
945 in graph_arr to break recursions.
946 Else we may not set graph_arr as there a later value is remembered. */
948 if (block->attr.block.graph_arr[pos] == NULL) {
949 ir_graph *irg = current_ir_graph;
951 if (block == get_irg_start_block(irg)) {
952 /* Collapsing to Bad tarvals is no good idea.
953 So we call a user-supplied routine here that deals with this case as
954 appropriate for the given language. Sorrily the only help we can give
955 here is the position.
957 Even if all variables are defined before use, it can happen that
958 we get to the start block, if a Cond has been replaced by a tuple
959 (bad, jmp). In this case we call the function needlessly, eventually
960 generating an non existent error.
961 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
964 if (default_initialize_local_variable != NULL) {
965 ir_node *rem = get_cur_block();
967 set_cur_block(block);
968 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
972 block->attr.block.graph_arr[pos] = new_Unknown(mode);
973 /* We don't need to care about exception ops in the start block.
974 There are none by definition. */
975 return block->attr.block.graph_arr[pos];
977 phi0 = new_rd_Phi0(irg, block, mode);
978 block->attr.block.graph_arr[pos] = phi0;
979 if (get_opt_precise_exc_context()) {
980 /* Set graph_arr for fragile ops. Also here we should break recursion.
981 We could choose a cyclic path through an cfop. But the recursion would
982 break at some point. */
983 set_frag_value(block->attr.block.graph_arr, pos, phi0);
988 /* This loop goes to all predecessor blocks of the block the Phi node
989 is in and there finds the operands of the Phi node by calling
990 get_r_value_internal. */
991 for (i = 1; i <= ins; ++i) {
992 ir_node *cf_pred = block->in[i];
993 ir_node *prevCfOp = skip_Proj(cf_pred);
995 if (is_Bad(prevCfOp)) {
996 /* In case a Cond has been optimized we would get right to the start block
997 with an invalid definition. */
998 nin[i-1] = new_Bad();
1001 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
1003 if (!is_Bad(prevBlock)) {
1004 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
1005 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
1006 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
1008 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
1010 nin[i-1] = new_Bad();
1014 /* We want to pass the Phi0 node to the constructor: this finds additional
1015 optimization possibilities.
1016 The Phi0 node either is allocated in this function, or it comes from
1017 a former call to get_r_value_internal(). In this case we may not yet
1018 exchange phi0, as this is done in mature_immBlock(). */
1020 phi0_all = block->attr.block.graph_arr[pos];
1021 if (! is_Phi0(phi0_all) ||
1022 get_irn_arity(phi0_all) != 0 ||
1023 get_nodes_block(phi0_all) != block)
1029 /* After collecting all predecessors into the array nin a new Phi node
1030 with these predecessors is created. This constructor contains an
1031 optimization: If all predecessors of the Phi node are identical it
1032 returns the only operand instead of a new Phi node. */
1033 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1035 /* In case we allocated a Phi0 node at the beginning of this procedure,
1036 we need to exchange this Phi0 with the real Phi. */
1038 exchange(phi0, res);
1039 block->attr.block.graph_arr[pos] = res;
1040 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1041 only an optimization. */
1048 * This function returns the last definition of a value. In case
1049 * this value was last defined in a previous block, Phi nodes are
1050 * inserted. If the part of the firm graph containing the definition
1051 * is not yet constructed, a dummy Phi node is returned.
1053 * @param block the current block
1054 * @param pos the value number of the value searched
1055 * @param mode the mode of this value (needed for Phi construction)
1057 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1060 /* There are 4 cases to treat.
1062 1. The block is not mature and we visit it the first time. We can not
1063 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1064 predecessors is returned. This node is added to the linked list (block
1065 attribute "phis") of the containing block to be completed when this block is
1066 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1069 2. The value is already known in this block, graph_arr[pos] is set and we
1070 visit the block the first time. We can return the value without
1071 creating any new nodes.
1073 3. The block is mature and we visit it the first time. A Phi node needs
1074 to be created (phi_merge). If the Phi is not needed, as all it's
1075 operands are the same value reaching the block through different
1076 paths, it's optimized away and the value itself is returned.
1078 4. The block is mature, and we visit it the second time. Now two
1079 subcases are possible:
1080 * The value was computed completely the last time we were here. This
1081 is the case if there is no loop. We can return the proper value.
1082 * The recursion that visited this node and set the flag did not
1083 return yet. We are computing a value in a loop and need to
1084 break the recursion. This case only happens if we visited
1085 the same block with phi_merge before, which inserted a Phi0.
1086 So we return the Phi0.
1089 /* case 4 -- already visited. */
1090 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1091 /* As phi_merge allocates a Phi0 this value is always defined. Here
1092 is the critical difference of the two algorithms. */
1093 assert(block->attr.block.graph_arr[pos]);
1094 return block->attr.block.graph_arr[pos];
1097 /* visited the first time */
1098 set_irn_visited(block, get_irg_visited(current_ir_graph));
1100 /* Get the local valid value */
1101 res = block->attr.block.graph_arr[pos];
1103 /* case 2 -- If the value is actually computed, return it. */
1107 if (block->attr.block.is_matured) { /* case 3 */
1109 /* The Phi has the same amount of ins as the corresponding block. */
1110 int ins = get_irn_arity(block);
1112 NEW_ARR_A(ir_node *, nin, ins);
1114 /* Phi merge collects the predecessors and then creates a node. */
1115 res = phi_merge(block, pos, mode, nin, ins);
1117 } else { /* case 1 */
1118 /* The block is not mature, we don't know how many in's are needed. A Phi
1119 with zero predecessors is created. Such a Phi node is called Phi0
1120 node. The Phi0 is then added to the list of Phi0 nodes in this block
1121 to be matured by mature_immBlock later.
1122 The Phi0 has to remember the pos of it's internal value. If the real
1123 Phi is computed, pos is used to update the array with the local
1125 res = new_rd_Phi0(current_ir_graph, block, mode);
1126 res->attr.phi.u.pos = pos;
1127 res->attr.phi.next = block->attr.block.phis;
1128 block->attr.block.phis = res;
1131 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1133 /* The local valid value is available now. */
1134 block->attr.block.graph_arr[pos] = res;
1137 } /* get_r_value_internal */
1139 /* ************************************************************************** */
1142 * Finalize a Block node, when all control flows are known.
1143 * Acceptable parameters are only Block nodes.
1145 void mature_immBlock(ir_node *block)
1151 assert(is_Block(block));
1152 if (!get_Block_matured(block)) {
1153 ir_graph *irg = current_ir_graph;
1155 ins = ARR_LEN(block->in) - 1;
1156 /* Fix block parameters */
1157 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1159 /* An array for building the Phi nodes. */
1160 NEW_ARR_A(ir_node *, nin, ins);
1162 /* Traverse a chain of Phi nodes attached to this block and mature
1164 for (n = block->attr.block.phis; n; n = next) {
1165 inc_irg_visited(irg);
1166 next = n->attr.phi.next;
1167 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1170 block->attr.block.is_matured = 1;
1172 /* Now, as the block is a finished Firm node, we can optimize it.
1173 Since other nodes have been allocated since the block was created
1174 we can not free the node on the obstack. Therefore we have to call
1175 optimize_in_place().
1176 Unfortunately the optimization does not change a lot, as all allocated
1177 nodes refer to the unoptimized node.
1178 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1179 block = optimize_in_place_2(block);
1180 IRN_VRFY_IRG(block, irg);
1182 } /* mature_immBlock */
1184 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
1186 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1189 ir_node *new_d_Const(dbg_info *db, tarval *con)
1191 return new_bd_Const(db, con);
1194 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
1196 return new_bd_Const_long(db, mode, value);
1197 } /* new_d_Const_long */
1199 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
1201 return new_bd_Const_type(db, con, tp);
1202 } /* new_d_Const_type */
1205 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
1208 assert(arg->op == op_Cond);
1209 arg->attr.cond.default_proj = max_proj;
1210 res = new_d_Proj(db, arg, mode_X, max_proj);
1212 } /* new_d_defaultProj */
1215 * Allocate a frag array for a node if the current graph state is phase_building.
1217 * @param irn the node for which the frag array should be allocated
1218 * @param op the opcode of the (original) node, if does not match opcode of irn,
1220 * @param frag_store the address of the frag store in irn attributes, if this
1221 * address contains a value != NULL, does nothing
1223 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
1225 if (get_opt_precise_exc_context()) {
1226 if ((current_ir_graph->phase_state == phase_building) &&
1227 (get_irn_op(irn) == op) && /* Could be optimized away. */
1228 !*frag_store) /* Could be a cse where the arr is already set. */ {
1229 *frag_store = new_frag_arr(irn);
1232 } /* firm_alloc_frag_arr */
1234 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1235 /* GL: objptr was called frame before. Frame was a bad choice for the name
1236 as the operand could as well be a pointer to a dynamic object. */
1238 return new_bd_Sel(db, current_ir_graph->current_block,
1239 store, objptr, 0, NULL, ent);
1240 } /* new_d_simpleSel */
1242 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
1244 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1246 } /* new_d_SymConst_type */
1248 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
1250 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1251 value, kind, firm_unknown_type);
1252 } /* new_d_SymConst */
1254 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
1256 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1259 ir_node *new_d_EndReg(dbg_info *db)
1261 return new_bd_EndReg(db, current_ir_graph->current_block);
1262 } /* new_d_EndReg */
1264 ir_node *new_d_EndExcept(dbg_info *db)
1266 return new_bd_EndExcept(db, current_ir_graph->current_block);
1267 } /* new_d_EndExcept */
1270 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1271 int n_outs, ir_asm_constraint *outputs, int n_clobber,
1272 ident *clobber[], ident *asm_text)
1274 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1277 /* ********************************************************************* */
1278 /* Comfortable interface with automatic Phi node construction. */
1279 /* (Uses also constructors of ?? interface, except new_Block. */
1280 /* ********************************************************************* */
1282 /* Block construction */
1283 /* immature Block without predecessors */
1284 ir_node *new_d_immBlock(dbg_info *db)
1288 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1289 /* creates a new dynamic in-array as length of in is -1 */
1290 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1292 /* macroblock head */
1295 res->attr.block.is_matured = 0;
1296 res->attr.block.is_dead = 0;
1297 res->attr.block.is_mb_head = 1;
1298 res->attr.block.irg.irg = current_ir_graph;
1299 res->attr.block.backedge = NULL;
1300 res->attr.block.in_cg = NULL;
1301 res->attr.block.cg_backedge = NULL;
1302 res->attr.block.extblk = NULL;
1303 res->attr.block.region = NULL;
1304 res->attr.block.mb_depth = 0;
1305 res->attr.block.entity = NULL;
1307 set_Block_block_visited(res, 0);
1309 /* Create and initialize array for Phi-node construction. */
1310 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1311 current_ir_graph->n_loc);
1312 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1314 /* Immature block may not be optimized! */
1315 IRN_VRFY_IRG(res, current_ir_graph);
1318 } /* new_d_immBlock */
1320 ir_node *new_immBlock(void)
1322 return new_d_immBlock(NULL);
1323 } /* new_immBlock */
1325 /* immature PartBlock with its predecessors */
1326 ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
1328 ir_node *res = new_d_immBlock(db);
1329 ir_node *blk = get_nodes_block(pred_jmp);
1331 res->in[0] = blk->in[0];
1332 assert(res->in[0] != NULL);
1333 add_immBlock_pred(res, pred_jmp);
1335 res->attr.block.is_mb_head = 0;
1336 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1339 } /* new_d_immPartBlock */
1341 ir_node *new_immPartBlock(ir_node *pred_jmp)
1343 return new_d_immPartBlock(NULL, pred_jmp);
1344 } /* new_immPartBlock */
1346 /* add an edge to a jmp/control flow node */
1347 void add_immBlock_pred(ir_node *block, ir_node *jmp)
1349 int n = ARR_LEN(block->in) - 1;
1351 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1352 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1353 assert(is_ir_node(jmp));
1355 ARR_APP1(ir_node *, block->in, jmp);
1357 hook_set_irn_n(block, n, jmp, NULL);
1358 } /* add_immBlock_pred */
1360 /* changing the current block */
1361 void set_cur_block(ir_node *target)
1363 current_ir_graph->current_block = target;
1364 } /* set_cur_block */
1366 /* ************************ */
1367 /* parameter administration */
1369 /* get a value from the parameter array from the current block by its index */
1370 ir_node *get_d_value(dbg_info *db, int pos, ir_mode *mode)
1372 ir_graph *irg = current_ir_graph;
1373 assert(get_irg_phase_state(irg) == phase_building);
1374 inc_irg_visited(irg);
1379 return get_r_value_internal(irg->current_block, pos + 1, mode);
1382 /* get a value from the parameter array from the current block by its index */
1383 ir_node *get_value(int pos, ir_mode *mode)
1385 return get_d_value(NULL, pos, mode);
1388 /* set a value at position pos in the parameter array from the current block */
1389 void set_value(int pos, ir_node *value)
1391 ir_graph *irg = current_ir_graph;
1392 assert(get_irg_phase_state(irg) == phase_building);
1394 assert(pos+1 < irg->n_loc);
1395 assert(is_ir_node(value));
1396 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1399 /* Find the value number for a node in the current block.*/
1400 int find_value(ir_node *value)
1403 ir_node *bl = current_ir_graph->current_block;
1405 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1406 if (bl->attr.block.graph_arr[i] == value)
1411 /* get the current store */
1412 ir_node *get_store(void)
1414 ir_graph *irg = current_ir_graph;
1416 assert(get_irg_phase_state(irg) == phase_building);
1417 /* GL: one could call get_value instead */
1418 inc_irg_visited(irg);
1419 return get_r_value_internal(irg->current_block, 0, mode_M);
1422 /* set the current store: handles automatic Sync construction for Load nodes */
1423 void set_store(ir_node *store)
1425 ir_node *load, *pload, *pred, *in[2];
1427 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1428 /* Beware: due to dead code elimination, a store might become a Bad node even in
1429 the construction phase. */
1430 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1432 if (get_opt_auto_create_sync()) {
1433 /* handle non-volatile Load nodes by automatically creating Sync's */
1434 load = skip_Proj(store);
1435 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1436 pred = get_Load_mem(load);
1438 if (is_Sync(pred)) {
1439 /* a Load after a Sync: move it up */
1440 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1442 set_Load_mem(load, get_memop_mem(mem));
1443 add_Sync_pred(pred, store);
1446 pload = skip_Proj(pred);
1447 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1448 /* a Load after a Load: create a new Sync */
1449 set_Load_mem(load, get_Load_mem(pload));
1453 store = new_Sync(2, in);
1458 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1461 void keep_alive(ir_node *ka)
1463 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1466 /* --- Useful access routines --- */
1467 /* Returns the current block of the current graph. To set the current
1468 block use set_cur_block. */
1469 ir_node *get_cur_block(void)
1471 return get_irg_current_block(current_ir_graph);
1472 } /* get_cur_block */
1474 /* Returns the frame type of the current graph */
1475 ir_type *get_cur_frame_type(void)
1477 return get_irg_frame_type(current_ir_graph);
1478 } /* get_cur_frame_type */
1481 /* ********************************************************************* */
1484 /* call once for each run of the library */
1485 void firm_init_cons(uninitialized_local_variable_func_t *func)
1487 default_initialize_local_variable = func;
1488 } /* firm_init_cons */
1490 void irp_finalize_cons(void)
1493 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1494 irg_finalize_cons(get_irp_irg(i));
1496 irp->phase_state = phase_high;
1497 } /* irp_finalize_cons */
1499 ir_node *new_Start(void)
1501 return new_d_Start(NULL);
1503 ir_node *new_End(void)
1505 return new_d_End(NULL);
1507 ir_node *new_Const(tarval *con)
1509 return new_d_Const(NULL, con);
1512 ir_node *new_Const_long(ir_mode *mode, long value)
1514 return new_d_Const_long(NULL, mode, value);
1517 ir_node *new_Const_type(tarval *con, ir_type *tp)
1519 return new_d_Const_type(NULL, con, tp);
1522 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1524 return new_d_SymConst_type(NULL, mode, value, kind, type);
1526 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1528 return new_d_SymConst(NULL, mode, value, kind);
1530 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1532 return new_d_simpleSel(NULL, store, objptr, ent);
1534 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1536 return new_d_Phi(NULL, arity, in, mode);
1538 ir_node *new_Sync(int arity, ir_node *in[])
1540 return new_d_Sync(NULL, arity, in);
1542 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1544 return new_d_defaultProj(NULL, arg, max_proj);
1546 ir_node *new_Bad(void)
1548 return get_irg_bad(current_ir_graph);
1550 ir_node *new_EndReg(void)
1552 return new_d_EndReg(NULL);
1554 ir_node *new_EndExcept(void)
1556 return new_d_EndExcept(NULL);
1558 ir_node *new_NoMem(void)
1560 return get_irg_no_mem(current_ir_graph);
1562 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1563 int n_outs, ir_asm_constraint *outputs,
1564 int n_clobber, ident *clobber[], ident *asm_text)
1566 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1569 /* create a new anchor node */
1570 ir_node *new_Anchor(ir_graph *irg)
1572 ir_node *in[anchor_last];
1573 memset(in, 0, sizeof(in));
1574 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);