2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
46 /* when we need verifying */
48 # define IRN_VRFY_IRG(res, irg)
50 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
54 * Language dependent variable initialization callback.
56 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
58 /* creates a bd constructor for a binop */
59 #define NEW_BD_BINOP(instr) \
61 new_bd_##instr(dbg_info *db, ir_node *block, \
62 ir_node *op1, ir_node *op2, ir_mode *mode) \
66 ir_graph *irg = current_ir_graph; \
69 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
70 res = optimize_node(res); \
71 IRN_VRFY_IRG(res, irg); \
75 /* creates a bd constructor for an unop */
76 #define NEW_BD_UNOP(instr) \
78 new_bd_##instr(dbg_info *db, ir_node *block, \
79 ir_node *op, ir_mode *mode) \
82 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an divop */
90 #define NEW_BD_DIVOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
97 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
102 res->attr.divmod.exc.pin_state = state; \
103 res->attr.divmod.resmode = mode; \
104 res->attr.divmod.no_remainder = 0; \
105 res = optimize_node(res); \
106 IRN_VRFY_IRG(res, irg); \
110 /* creates a rd constructor for a binop */
111 #define NEW_RD_BINOP(instr) \
113 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
114 ir_node *op1, ir_node *op2, ir_mode *mode) \
117 ir_graph *rem = current_ir_graph; \
118 current_ir_graph = irg; \
119 res = new_bd_##instr(db, block, op1, op2, mode); \
120 current_ir_graph = rem; \
124 /* creates a rd constructor for an unop */
125 #define NEW_RD_UNOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an divop */
139 #define NEW_RD_DIVOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
148 current_ir_graph = rem; \
152 /* creates a d constructor for an binop */
153 #define NEW_D_BINOP(instr) \
155 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
156 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
159 /* creates a d constructor for an unop */
160 #define NEW_D_UNOP(instr) \
162 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
163 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
166 #include "gen_ir_cons.c.inl"
168 static ir_node *new_bd_Start(dbg_info *db, ir_node *block)
171 ir_graph *irg = current_ir_graph;
173 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
175 IRN_VRFY_IRG(res, irg);
179 static ir_node *new_bd_End(dbg_info *db, ir_node *block)
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
186 IRN_VRFY_IRG(res, irg);
191 * Creates a Phi node with all predecessors. Calling this constructor
192 * is only allowed if the corresponding block is mature.
194 static ir_node *new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
197 ir_graph *irg = current_ir_graph;
201 /* Don't assert that block matured: the use of this constructor is strongly
203 if (get_Block_matured(block))
204 assert(get_irn_arity(block) == arity);
206 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
208 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
210 for (i = arity - 1; i >= 0; --i)
211 if (is_Unknown(in[i])) {
216 if (!has_unknown) res = optimize_node(res);
217 IRN_VRFY_IRG(res, irg);
219 /* Memory Phis in endless loops must be kept alive.
220 As we can't distinguish these easily we keep all of them alive. */
221 if (is_Phi(res) && mode == mode_M)
222 add_End_keepalive(get_irg_end(irg), res);
226 static ir_node *new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
229 ir_graph *irg = current_ir_graph;
231 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
232 res->attr.con.tv = con;
233 set_Const_type(res, tp); /* Call method because of complex assertion. */
234 res = optimize_node (res);
235 assert(get_Const_type(res) == tp);
236 IRN_VRFY_IRG(res, irg);
239 } /* new_bd_Const_type */
241 static ir_node *new_bd_Const(dbg_info *db, tarval *con)
243 ir_graph *irg = current_ir_graph;
245 return new_rd_Const_type(db, irg, con, firm_unknown_type);
248 static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
250 ir_graph *irg = current_ir_graph;
252 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
253 } /* new_bd_Const_long */
255 static ir_node *new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
260 assert(arg->op == op_Cond);
261 arg->attr.cond.default_proj = max_proj;
262 res = new_rd_Proj(db, block, arg, mode_X, max_proj);
264 } /* new_bd_defaultProj */
266 static ir_node *new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store,
267 ir_node *objptr, int arity, ir_node **in,
273 ir_graph *irg = current_ir_graph;
274 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
276 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
279 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
282 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
284 * Sel's can select functions which should be of mode mode_P_code.
286 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
287 res->attr.sel.entity = ent;
288 res = optimize_node(res);
289 IRN_VRFY_IRG(res, irg);
293 static ir_node *new_bd_SymConst_type(dbg_info *db, ir_node *block,
294 ir_mode *mode, symconst_symbol value,
295 symconst_kind symkind, ir_type *tp)
297 ir_graph *irg = current_ir_graph;
298 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
300 res->attr.symc.kind = symkind;
301 res->attr.symc.sym = value;
302 res->attr.symc.tp = tp;
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
307 } /* new_bd_SymConst_type */
309 static ir_node *new_bd_Sync(dbg_info *db, ir_node *block)
312 ir_graph *irg = current_ir_graph;
314 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
315 /* no need to call optimize node here, Sync are always created with no predecessors */
316 IRN_VRFY_IRG(res, irg);
321 static ir_node *new_bd_EndReg(dbg_info *db, ir_node *block)
324 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
327 set_irg_end_reg(irg, res);
328 IRN_VRFY_IRG(res, irg);
330 } /* new_bd_EndReg */
332 static ir_node *new_bd_EndExcept(dbg_info *db, ir_node *block)
335 ir_graph *irg = current_ir_graph;
337 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
338 set_irg_end_except(irg, res);
339 IRN_VRFY_IRG (res, irg);
341 } /* new_bd_EndExcept */
343 static ir_node *new_bd_ASM(dbg_info *db, ir_node *block, int arity,
344 ir_node *in[], ir_asm_constraint *inputs, int n_outs,
345 ir_asm_constraint *outputs, int n_clobber,
346 ident *clobber[], ident *asm_text)
349 ir_graph *irg = current_ir_graph;
351 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
352 res->attr.assem.pin_state = op_pin_state_pinned;
353 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
354 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
355 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
356 res->attr.assem.asm_text = asm_text;
358 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
359 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
360 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
367 /* --------------------------------------------- */
368 /* private interfaces, for professional use only */
369 /* --------------------------------------------- */
371 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
373 ir_graph *rem = current_ir_graph;
376 current_ir_graph = irg;
377 res = new_bd_Start(db, block);
378 current_ir_graph = rem;
383 ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
386 ir_graph *rem = current_ir_graph;
388 current_ir_graph = irg;
389 res = new_bd_End(db, block);
390 current_ir_graph = rem;
395 /* Creates a Phi node with all predecessors. Calling this constructor
396 is only allowed if the corresponding block is mature. */
397 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
400 ir_graph *rem = current_ir_graph;
402 current_ir_graph = get_Block_irg(block);
403 res = new_bd_Phi(db, block,arity, in, mode);
404 current_ir_graph = rem;
409 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
412 ir_graph *rem = current_ir_graph;
414 current_ir_graph = irg;
415 res = new_bd_Const_type(db, con, tp);
416 current_ir_graph = rem;
419 } /* new_rd_Const_type */
421 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
424 //#ifdef USE_ORIGINAL
425 ir_graph *rem = current_ir_graph;
427 current_ir_graph = irg;
428 res = new_bd_Const_type(db, con, firm_unknown_type);
429 current_ir_graph = rem;
431 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
437 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
439 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
440 } /* new_rd_Const_long */
442 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg, long max_proj)
445 ir_graph *rem = current_ir_graph;
447 current_ir_graph = get_Block_irg(block);
448 res = new_bd_defaultProj(db, block, arg, max_proj);
449 current_ir_graph = rem;
452 } /* new_rd_defaultProj */
454 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
455 ir_node *objptr, ir_entity *ent)
458 ir_graph *rem = current_ir_graph;
460 current_ir_graph = get_Block_irg(block);
461 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
462 current_ir_graph = rem;
465 } /* new_rd_simpleSel */
467 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
468 symconst_symbol value, symconst_kind symkind,
472 ir_graph *rem = current_ir_graph;
473 ir_node *block = get_irg_start_block(irg);
475 current_ir_graph = irg;
476 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
477 current_ir_graph = rem;
480 } /* new_rd_SymConst_type */
482 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
483 symconst_symbol value, symconst_kind symkind)
485 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
486 } /* new_rd_SymConst */
488 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
491 sym.entity_p = symbol;
492 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
493 } /* new_rd_SymConst_addr_ent */
495 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
498 sym.entity_p = symbol;
499 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
500 } /* new_rd_SymConst_ofs_ent */
502 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp)
505 sym.ident_p = symbol;
506 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_name, tp);
507 } /* new_rd_SymConst_addr_name */
509 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
513 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
514 } /* new_rd_SymConst_type_tag */
516 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
520 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
521 } /* new_rd_SymConst_size */
523 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
527 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
528 } /* new_rd_SymConst_align */
530 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
533 ir_graph *rem = current_ir_graph;
536 current_ir_graph = get_Block_irg(block);
537 res = new_bd_Sync(db, block);
538 current_ir_graph = rem;
540 for (i = 0; i < arity; ++i)
541 add_Sync_pred(res, in[i]);
546 ir_node *new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
550 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
551 set_irg_end_reg(irg, res);
552 IRN_VRFY_IRG(res, irg);
554 } /* new_rd_EndReg */
556 ir_node *new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
560 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
561 set_irg_end_except(irg, res);
562 IRN_VRFY_IRG (res, irg);
564 } /* new_rd_EndExcept */
566 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
567 int arity, ir_node *in[], ir_asm_constraint *inputs,
568 int n_outs, ir_asm_constraint *outputs,
569 int n_clobber, ident *clobber[], ident *asm_text) {
571 ir_graph *rem = current_ir_graph;
573 current_ir_graph = get_Block_irg(block);
574 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
575 current_ir_graph = rem;
580 ir_node *new_r_Start(ir_graph *irg, ir_node *block)
582 return new_rd_Start(NULL, irg, block);
584 ir_node *new_r_End(ir_graph *irg, ir_node *block)
586 return new_rd_End(NULL, irg, block);
588 ir_node *new_r_Const(ir_graph *irg, tarval *con)
590 return new_rd_Const(NULL, irg, con);
592 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
594 return new_rd_Const_long(NULL, irg, mode, value);
596 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
598 return new_rd_Const_type(NULL, irg, con, tp);
600 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode,
601 symconst_symbol value, symconst_kind symkind) {
602 return new_rd_SymConst(NULL, irg, mode, value, symkind);
604 ir_node *new_r_simpleSel(ir_node *block, ir_node *store,
605 ir_node *objptr, ir_entity *ent) {
606 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
608 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
610 return new_rd_Phi(NULL, block, arity, in, mode);
612 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
614 return new_rd_Sync(NULL, block, arity, in);
616 ir_node *new_r_defaultProj(ir_node *block, ir_node *arg, long max_proj)
618 return new_rd_defaultProj(NULL, block, arg, max_proj);
620 ir_node *new_r_Bad(ir_graph *irg)
622 return get_irg_bad(irg);
624 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block)
626 return new_rd_EndReg(NULL, irg, block);
628 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block)
630 return new_rd_EndExcept(NULL, irg, block);
632 ir_node *new_r_NoMem(ir_graph *irg)
634 return get_irg_no_mem(irg);
636 ir_node *new_r_ASM(ir_node *block,
637 int arity, ir_node *in[], ir_asm_constraint *inputs,
638 int n_outs, ir_asm_constraint *outputs,
639 int n_clobber, ident *clobber[], ident *asm_text) {
640 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
643 /** ********************/
644 /** public interfaces */
645 /** construction tools */
647 ir_node *new_d_Start(dbg_info *db)
651 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
652 op_Start, mode_T, 0, NULL);
654 res = optimize_node(res);
655 IRN_VRFY_IRG(res, current_ir_graph);
659 ir_node *new_d_End(dbg_info *db)
662 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
663 op_End, mode_X, -1, NULL);
664 res = optimize_node(res);
665 IRN_VRFY_IRG(res, current_ir_graph);
670 /* ***********************************************************************/
671 /* Methods necessary for automatic Phi node creation */
673 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
674 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
675 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
676 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
678 Call Graph: ( A ---> B == A "calls" B)
680 get_value mature_immBlock
688 get_r_value_internal |
692 new_rd_Phi0 new_rd_Phi_in
694 * *************************************************************************** */
696 /** Creates a Phi node with 0 predecessors. */
697 static inline ir_node *new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
701 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
702 IRN_VRFY_IRG(res, irg);
708 * Internal constructor of a Phi node by a phi_merge operation.
710 * @param irg the graph on which the Phi will be constructed
711 * @param block the block in which the Phi will be constructed
712 * @param mode the mod eof the Phi node
713 * @param in the input array of the phi node
714 * @param ins number of elements in the input array
715 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
716 * the value for which the new Phi is constructed
718 static inline ir_node *new_rd_Phi_in(ir_graph *irg, ir_node *block,
719 ir_mode *mode, ir_node **in, int ins,
723 ir_node *res, *known;
725 /* Allocate a new node on the obstack. The allocation copies the in
727 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
728 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
730 /* This loop checks whether the Phi has more than one predecessor.
731 If so, it is a real Phi node and we break the loop. Else the
732 Phi node merges the same definition on several paths and therefore
734 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
736 for (i = ins - 1; i >= 0; --i) {
739 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
741 /* Optimize self referencing Phis: We can't detect them yet properly, as
742 they still refer to the Phi0 they will replace. So replace right now. */
743 if (phi0 && in[i] == phi0)
746 if (in[i] == res || in[i] == known)
755 /* i < 0: there is at most one predecessor, we don't need a phi node. */
758 edges_node_deleted(res, current_ir_graph);
759 obstack_free(current_ir_graph->obst, res);
761 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
762 order, an enclosing Phi know may get superfluous. */
763 res = optimize_in_place_2(known);
765 exchange(known, res);
770 /* A undefined value, e.g., in unreachable code. */
774 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
775 IRN_VRFY_IRG(res, irg);
776 /* Memory Phis in endless loops must be kept alive.
777 As we can't distinguish these easily we keep all of them alive. */
778 if (is_Phi(res) && mode == mode_M)
779 add_End_keepalive(get_irg_end(irg), res);
783 } /* new_rd_Phi_in */
785 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
787 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
790 * Construct a new frag_array for node n.
791 * Copy the content from the current graph_arr of the corresponding block:
792 * this is the current state.
793 * Set ProjM(n) as current memory state.
794 * Further the last entry in frag_arr of current block points to n. This
795 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
797 static inline ir_node **new_frag_arr(ir_node *n)
802 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
803 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
804 sizeof(ir_node *)*current_ir_graph->n_loc);
806 /* turn off optimization before allocating Proj nodes, as res isn't
808 opt = get_opt_optimize(); set_optimize(0);
809 /* Here we rely on the fact that all frag ops have Memory as first result! */
811 arr[0] = new_Proj(n, mode_M, pn_Call_M);
812 } else if (is_CopyB(n)) {
813 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
815 assert((pn_Quot_M == pn_DivMod_M) &&
816 (pn_Quot_M == pn_Div_M) &&
817 (pn_Quot_M == pn_Mod_M) &&
818 (pn_Quot_M == pn_Load_M) &&
819 (pn_Quot_M == pn_Store_M) &&
820 (pn_Quot_M == pn_Alloc_M) &&
821 (pn_Quot_M == pn_Bound_M));
822 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
826 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
831 * Returns the frag_arr from a node.
833 static inline ir_node **get_frag_arr(ir_node *n)
835 switch (get_irn_opcode(n)) {
837 return n->attr.call.exc.frag_arr;
839 return n->attr.alloc.exc.frag_arr;
841 return n->attr.load.exc.frag_arr;
843 return n->attr.store.exc.frag_arr;
845 return n->attr.except.frag_arr;
849 static void set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
854 for (i = 1024; i >= 0; --i)
859 if (frag_arr[pos] == NULL)
861 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
862 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
863 assert(arr != frag_arr && "Endless recursion detected");
868 assert(!"potential endless recursion in set_frag_value");
869 } /* set_frag_value */
871 static ir_node *get_r_frag_value_internal(ir_node *block, ir_node *cfOp,
872 int pos, ir_mode *mode)
877 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
879 frag_arr = get_frag_arr(cfOp);
882 if (block->attr.block.graph_arr[pos] != NULL) {
883 /* There was a set_value() after the cfOp and no get_value() before that
884 set_value(). We must build a Phi node now. */
885 if (block->attr.block.is_matured) {
886 int ins = get_irn_arity(block);
888 NEW_ARR_A(ir_node *, nin, ins);
889 res = phi_merge(block, pos, mode, nin, ins);
891 res = new_rd_Phi0(current_ir_graph, block, mode);
892 res->attr.phi.u.pos = pos;
893 res->attr.phi.next = block->attr.block.phis;
894 block->attr.block.phis = res;
897 /* It's a Phi, we can write this into all graph_arrs with NULL */
898 set_frag_value(block->attr.block.graph_arr, pos, res);
900 res = get_r_value_internal(block, pos, mode);
901 set_frag_value(block->attr.block.graph_arr, pos, res);
905 } /* get_r_frag_value_internal */
908 * Check whether a control flownode cf_pred represents an exception flow.
910 * @param cf_pred the control flow node
911 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
913 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
916 * Note: all projections from a raise are "exceptional control flow" we we handle it
917 * like a normal Jmp, because there is no "regular" one.
918 * That's why Raise is no "fragile_op"!
920 if (is_fragile_op(prev_cf_op)) {
921 if (is_Proj(cf_pred)) {
922 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
923 /* the regular control flow, NO exception */
926 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
929 /* Hmm, exception but not a Proj? */
930 assert(!"unexpected condition: fragile op without a proj");
934 } /* is_exception_flow */
937 * Computes the predecessors for the real phi node, and then
938 * allocates and returns this node. The routine called to allocate the
939 * node might optimize it away and return a real value.
940 * This function must be called with an in-array of proper size.
942 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
944 ir_node *prevBlock, *res, *phi0, *phi0_all;
947 /* If this block has no value at pos create a Phi0 and remember it
948 in graph_arr to break recursions.
949 Else we may not set graph_arr as there a later value is remembered. */
951 if (block->attr.block.graph_arr[pos] == NULL) {
952 ir_graph *irg = current_ir_graph;
954 if (block == get_irg_start_block(irg)) {
955 /* Collapsing to Bad tarvals is no good idea.
956 So we call a user-supplied routine here that deals with this case as
957 appropriate for the given language. Sorrily the only help we can give
958 here is the position.
960 Even if all variables are defined before use, it can happen that
961 we get to the start block, if a Cond has been replaced by a tuple
962 (bad, jmp). In this case we call the function needlessly, eventually
963 generating an non existent error.
964 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
967 if (default_initialize_local_variable != NULL) {
968 ir_node *rem = get_cur_block();
970 set_cur_block(block);
971 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
975 block->attr.block.graph_arr[pos] = new_Unknown(mode);
976 /* We don't need to care about exception ops in the start block.
977 There are none by definition. */
978 return block->attr.block.graph_arr[pos];
980 phi0 = new_rd_Phi0(irg, block, mode);
981 block->attr.block.graph_arr[pos] = phi0;
982 if (get_opt_precise_exc_context()) {
983 /* Set graph_arr for fragile ops. Also here we should break recursion.
984 We could choose a cyclic path through an cfop. But the recursion would
985 break at some point. */
986 set_frag_value(block->attr.block.graph_arr, pos, phi0);
991 /* This loop goes to all predecessor blocks of the block the Phi node
992 is in and there finds the operands of the Phi node by calling
993 get_r_value_internal. */
994 for (i = 1; i <= ins; ++i) {
995 ir_node *cf_pred = block->in[i];
996 ir_node *prevCfOp = skip_Proj(cf_pred);
998 if (is_Bad(prevCfOp)) {
999 /* In case a Cond has been optimized we would get right to the start block
1000 with an invalid definition. */
1001 nin[i-1] = new_Bad();
1004 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
1006 if (!is_Bad(prevBlock)) {
1007 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
1008 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
1009 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
1011 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
1013 nin[i-1] = new_Bad();
1017 /* We want to pass the Phi0 node to the constructor: this finds additional
1018 optimization possibilities.
1019 The Phi0 node either is allocated in this function, or it comes from
1020 a former call to get_r_value_internal(). In this case we may not yet
1021 exchange phi0, as this is done in mature_immBlock(). */
1023 phi0_all = block->attr.block.graph_arr[pos];
1024 if (! is_Phi0(phi0_all) ||
1025 get_irn_arity(phi0_all) != 0 ||
1026 get_nodes_block(phi0_all) != block)
1032 /* After collecting all predecessors into the array nin a new Phi node
1033 with these predecessors is created. This constructor contains an
1034 optimization: If all predecessors of the Phi node are identical it
1035 returns the only operand instead of a new Phi node. */
1036 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1038 /* In case we allocated a Phi0 node at the beginning of this procedure,
1039 we need to exchange this Phi0 with the real Phi. */
1041 exchange(phi0, res);
1042 block->attr.block.graph_arr[pos] = res;
1043 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1044 only an optimization. */
1051 * This function returns the last definition of a value. In case
1052 * this value was last defined in a previous block, Phi nodes are
1053 * inserted. If the part of the firm graph containing the definition
1054 * is not yet constructed, a dummy Phi node is returned.
1056 * @param block the current block
1057 * @param pos the value number of the value searched
1058 * @param mode the mode of this value (needed for Phi construction)
1060 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1063 /* There are 4 cases to treat.
1065 1. The block is not mature and we visit it the first time. We can not
1066 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1067 predecessors is returned. This node is added to the linked list (block
1068 attribute "phis") of the containing block to be completed when this block is
1069 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1072 2. The value is already known in this block, graph_arr[pos] is set and we
1073 visit the block the first time. We can return the value without
1074 creating any new nodes.
1076 3. The block is mature and we visit it the first time. A Phi node needs
1077 to be created (phi_merge). If the Phi is not needed, as all it's
1078 operands are the same value reaching the block through different
1079 paths, it's optimized away and the value itself is returned.
1081 4. The block is mature, and we visit it the second time. Now two
1082 subcases are possible:
1083 * The value was computed completely the last time we were here. This
1084 is the case if there is no loop. We can return the proper value.
1085 * The recursion that visited this node and set the flag did not
1086 return yet. We are computing a value in a loop and need to
1087 break the recursion. This case only happens if we visited
1088 the same block with phi_merge before, which inserted a Phi0.
1089 So we return the Phi0.
1092 /* case 4 -- already visited. */
1093 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1094 /* As phi_merge allocates a Phi0 this value is always defined. Here
1095 is the critical difference of the two algorithms. */
1096 assert(block->attr.block.graph_arr[pos]);
1097 return block->attr.block.graph_arr[pos];
1100 /* visited the first time */
1101 set_irn_visited(block, get_irg_visited(current_ir_graph));
1103 /* Get the local valid value */
1104 res = block->attr.block.graph_arr[pos];
1106 /* case 2 -- If the value is actually computed, return it. */
1110 if (block->attr.block.is_matured) { /* case 3 */
1112 /* The Phi has the same amount of ins as the corresponding block. */
1113 int ins = get_irn_arity(block);
1115 NEW_ARR_A(ir_node *, nin, ins);
1117 /* Phi merge collects the predecessors and then creates a node. */
1118 res = phi_merge(block, pos, mode, nin, ins);
1120 } else { /* case 1 */
1121 /* The block is not mature, we don't know how many in's are needed. A Phi
1122 with zero predecessors is created. Such a Phi node is called Phi0
1123 node. The Phi0 is then added to the list of Phi0 nodes in this block
1124 to be matured by mature_immBlock later.
1125 The Phi0 has to remember the pos of it's internal value. If the real
1126 Phi is computed, pos is used to update the array with the local
1128 res = new_rd_Phi0(current_ir_graph, block, mode);
1129 res->attr.phi.u.pos = pos;
1130 res->attr.phi.next = block->attr.block.phis;
1131 block->attr.block.phis = res;
1134 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1136 /* The local valid value is available now. */
1137 block->attr.block.graph_arr[pos] = res;
1140 } /* get_r_value_internal */
1142 /* ************************************************************************** */
1145 * Finalize a Block node, when all control flows are known.
1146 * Acceptable parameters are only Block nodes.
1148 void mature_immBlock(ir_node *block)
1154 assert(is_Block(block));
1155 if (!get_Block_matured(block)) {
1156 ir_graph *irg = current_ir_graph;
1158 ins = ARR_LEN(block->in) - 1;
1159 /* Fix block parameters */
1160 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1162 /* An array for building the Phi nodes. */
1163 NEW_ARR_A(ir_node *, nin, ins);
1165 /* Traverse a chain of Phi nodes attached to this block and mature
1167 for (n = block->attr.block.phis; n; n = next) {
1168 inc_irg_visited(irg);
1169 next = n->attr.phi.next;
1170 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1173 block->attr.block.is_matured = 1;
1175 /* Now, as the block is a finished Firm node, we can optimize it.
1176 Since other nodes have been allocated since the block was created
1177 we can not free the node on the obstack. Therefore we have to call
1178 optimize_in_place().
1179 Unfortunately the optimization does not change a lot, as all allocated
1180 nodes refer to the unoptimized node.
1181 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1182 block = optimize_in_place_2(block);
1183 IRN_VRFY_IRG(block, irg);
1185 } /* mature_immBlock */
1187 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
1189 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1192 ir_node *new_d_Const(dbg_info *db, tarval *con)
1194 return new_bd_Const(db, con);
1197 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
1199 return new_bd_Const_long(db, mode, value);
1200 } /* new_d_Const_long */
1202 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
1204 return new_bd_Const_type(db, con, tp);
1205 } /* new_d_Const_type */
1208 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
1211 assert(arg->op == op_Cond);
1212 arg->attr.cond.default_proj = max_proj;
1213 res = new_d_Proj(db, arg, mode_X, max_proj);
1215 } /* new_d_defaultProj */
1218 * Allocate a frag array for a node if the current graph state is phase_building.
1220 * @param irn the node for which the frag array should be allocated
1221 * @param op the opcode of the (original) node, if does not match opcode of irn,
1223 * @param frag_store the address of the frag store in irn attributes, if this
1224 * address contains a value != NULL, does nothing
1226 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
1228 if (get_opt_precise_exc_context()) {
1229 if ((current_ir_graph->phase_state == phase_building) &&
1230 (get_irn_op(irn) == op) && /* Could be optimized away. */
1231 !*frag_store) /* Could be a cse where the arr is already set. */ {
1232 *frag_store = new_frag_arr(irn);
1235 } /* firm_alloc_frag_arr */
1237 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1238 /* GL: objptr was called frame before. Frame was a bad choice for the name
1239 as the operand could as well be a pointer to a dynamic object. */
1241 return new_bd_Sel(db, current_ir_graph->current_block,
1242 store, objptr, 0, NULL, ent);
1243 } /* new_d_simpleSel */
1245 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
1247 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1249 } /* new_d_SymConst_type */
1251 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
1253 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1254 value, kind, firm_unknown_type);
1255 } /* new_d_SymConst */
1257 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
1259 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1262 ir_node *new_d_EndReg(dbg_info *db)
1264 return new_bd_EndReg(db, current_ir_graph->current_block);
1265 } /* new_d_EndReg */
1267 ir_node *new_d_EndExcept(dbg_info *db)
1269 return new_bd_EndExcept(db, current_ir_graph->current_block);
1270 } /* new_d_EndExcept */
1273 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1274 int n_outs, ir_asm_constraint *outputs, int n_clobber,
1275 ident *clobber[], ident *asm_text)
1277 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1280 /* ********************************************************************* */
1281 /* Comfortable interface with automatic Phi node construction. */
1282 /* (Uses also constructors of ?? interface, except new_Block. */
1283 /* ********************************************************************* */
1285 /* Block construction */
1286 /* immature Block without predecessors */
1287 ir_node *new_d_immBlock(dbg_info *db)
1291 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1292 /* creates a new dynamic in-array as length of in is -1 */
1293 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1295 /* macroblock head */
1298 res->attr.block.is_matured = 0;
1299 res->attr.block.is_dead = 0;
1300 res->attr.block.is_mb_head = 1;
1301 res->attr.block.irg.irg = current_ir_graph;
1302 res->attr.block.backedge = NULL;
1303 res->attr.block.in_cg = NULL;
1304 res->attr.block.cg_backedge = NULL;
1305 res->attr.block.extblk = NULL;
1306 res->attr.block.region = NULL;
1307 res->attr.block.mb_depth = 0;
1308 res->attr.block.entity = NULL;
1310 set_Block_block_visited(res, 0);
1312 /* Create and initialize array for Phi-node construction. */
1313 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1314 current_ir_graph->n_loc);
1315 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1317 /* Immature block may not be optimized! */
1318 IRN_VRFY_IRG(res, current_ir_graph);
1321 } /* new_d_immBlock */
1323 ir_node *new_immBlock(void)
1325 return new_d_immBlock(NULL);
1326 } /* new_immBlock */
1328 /* immature PartBlock with its predecessors */
1329 ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
1331 ir_node *res = new_d_immBlock(db);
1332 ir_node *blk = get_nodes_block(pred_jmp);
1334 res->in[0] = blk->in[0];
1335 assert(res->in[0] != NULL);
1336 add_immBlock_pred(res, pred_jmp);
1338 res->attr.block.is_mb_head = 0;
1339 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1342 } /* new_d_immPartBlock */
1344 ir_node *new_immPartBlock(ir_node *pred_jmp)
1346 return new_d_immPartBlock(NULL, pred_jmp);
1347 } /* new_immPartBlock */
1349 /* add an edge to a jmp/control flow node */
1350 void add_immBlock_pred(ir_node *block, ir_node *jmp)
1352 int n = ARR_LEN(block->in) - 1;
1354 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1355 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1356 assert(is_ir_node(jmp));
1358 ARR_APP1(ir_node *, block->in, jmp);
1360 hook_set_irn_n(block, n, jmp, NULL);
1361 } /* add_immBlock_pred */
1363 /* changing the current block */
1364 void set_cur_block(ir_node *target)
1366 current_ir_graph->current_block = target;
1367 } /* set_cur_block */
1369 /* ************************ */
1370 /* parameter administration */
1372 /* get a value from the parameter array from the current block by its index */
1373 ir_node *get_d_value(dbg_info *db, int pos, ir_mode *mode)
1375 ir_graph *irg = current_ir_graph;
1376 assert(get_irg_phase_state(irg) == phase_building);
1377 inc_irg_visited(irg);
1382 return get_r_value_internal(irg->current_block, pos + 1, mode);
1385 /* get a value from the parameter array from the current block by its index */
1386 ir_node *get_value(int pos, ir_mode *mode)
1388 return get_d_value(NULL, pos, mode);
1391 /* set a value at position pos in the parameter array from the current block */
1392 void set_value(int pos, ir_node *value)
1394 ir_graph *irg = current_ir_graph;
1395 assert(get_irg_phase_state(irg) == phase_building);
1397 assert(pos+1 < irg->n_loc);
1398 assert(is_ir_node(value));
1399 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1402 /* Find the value number for a node in the current block.*/
1403 int find_value(ir_node *value)
1406 ir_node *bl = current_ir_graph->current_block;
1408 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1409 if (bl->attr.block.graph_arr[i] == value)
1414 /* get the current store */
1415 ir_node *get_store(void)
1417 ir_graph *irg = current_ir_graph;
1419 assert(get_irg_phase_state(irg) == phase_building);
1420 /* GL: one could call get_value instead */
1421 inc_irg_visited(irg);
1422 return get_r_value_internal(irg->current_block, 0, mode_M);
1425 /* set the current store: handles automatic Sync construction for Load nodes */
1426 void set_store(ir_node *store)
1428 ir_node *load, *pload, *pred, *in[2];
1430 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1431 /* Beware: due to dead code elimination, a store might become a Bad node even in
1432 the construction phase. */
1433 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1435 if (get_opt_auto_create_sync()) {
1436 /* handle non-volatile Load nodes by automatically creating Sync's */
1437 load = skip_Proj(store);
1438 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1439 pred = get_Load_mem(load);
1441 if (is_Sync(pred)) {
1442 /* a Load after a Sync: move it up */
1443 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1445 set_Load_mem(load, get_memop_mem(mem));
1446 add_Sync_pred(pred, store);
1449 pload = skip_Proj(pred);
1450 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1451 /* a Load after a Load: create a new Sync */
1452 set_Load_mem(load, get_Load_mem(pload));
1456 store = new_Sync(2, in);
1461 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1464 void keep_alive(ir_node *ka)
1466 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1469 /* --- Useful access routines --- */
1470 /* Returns the current block of the current graph. To set the current
1471 block use set_cur_block. */
1472 ir_node *get_cur_block(void)
1474 return get_irg_current_block(current_ir_graph);
1475 } /* get_cur_block */
1477 /* Returns the frame type of the current graph */
1478 ir_type *get_cur_frame_type(void)
1480 return get_irg_frame_type(current_ir_graph);
1481 } /* get_cur_frame_type */
1484 /* ********************************************************************* */
1487 /* call once for each run of the library */
1488 void firm_init_cons(uninitialized_local_variable_func_t *func)
1490 default_initialize_local_variable = func;
1491 } /* firm_init_cons */
1493 void irp_finalize_cons(void)
1496 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1497 irg_finalize_cons(get_irp_irg(i));
1499 irp->phase_state = phase_high;
1500 } /* irp_finalize_cons */
1502 ir_node *new_Start(void)
1504 return new_d_Start(NULL);
1506 ir_node *new_End(void)
1508 return new_d_End(NULL);
1510 ir_node *new_Const(tarval *con)
1512 return new_d_Const(NULL, con);
1515 ir_node *new_Const_long(ir_mode *mode, long value)
1517 return new_d_Const_long(NULL, mode, value);
1520 ir_node *new_Const_type(tarval *con, ir_type *tp)
1522 return new_d_Const_type(NULL, con, tp);
1525 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1527 return new_d_SymConst_type(NULL, mode, value, kind, type);
1529 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1531 return new_d_SymConst(NULL, mode, value, kind);
1533 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1535 return new_d_simpleSel(NULL, store, objptr, ent);
1537 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1539 return new_d_Phi(NULL, arity, in, mode);
1541 ir_node *new_Sync(int arity, ir_node *in[])
1543 return new_d_Sync(NULL, arity, in);
1545 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1547 return new_d_defaultProj(NULL, arg, max_proj);
1549 ir_node *new_Bad(void)
1551 return get_irg_bad(current_ir_graph);
1553 ir_node *new_EndReg(void)
1555 return new_d_EndReg(NULL);
1557 ir_node *new_EndExcept(void)
1559 return new_d_EndExcept(NULL);
1561 ir_node *new_NoMem(void)
1563 return get_irg_no_mem(current_ir_graph);
1565 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1566 int n_outs, ir_asm_constraint *outputs,
1567 int n_clobber, ident *clobber[], ident *asm_text) {
1568 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1571 /* create a new anchor node */
1572 ir_node *new_Anchor(ir_graph *irg)
1574 ir_node *in[anchor_last];
1575 memset(in, 0, sizeof(in));
1576 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);