2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 /* when we need verifying */
49 # define IRN_VRFY_IRG(res, irg)
51 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
55 * Language dependent variable initialization callback.
57 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
59 /* creates a bd constructor for a binop */
60 #define NEW_BD_BINOP(instr) \
62 new_bd_##instr(dbg_info *db, ir_node *block, \
63 ir_node *op1, ir_node *op2, ir_mode *mode) \
67 ir_graph *irg = current_ir_graph; \
70 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
71 res = optimize_node(res); \
72 IRN_VRFY_IRG(res, irg); \
76 /* creates a bd constructor for an unop */
77 #define NEW_BD_UNOP(instr) \
79 new_bd_##instr(dbg_info *db, ir_node *block, \
80 ir_node *op, ir_mode *mode) \
83 ir_graph *irg = current_ir_graph; \
84 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
85 res = optimize_node(res); \
86 IRN_VRFY_IRG(res, irg); \
90 /* creates a bd constructor for an divop */
91 #define NEW_BD_DIVOP(instr) \
93 new_bd_##instr(dbg_info *db, ir_node *block, \
94 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
98 ir_graph *irg = current_ir_graph; \
102 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
103 res->attr.divmod.exc.pin_state = state; \
104 res->attr.divmod.resmode = mode; \
105 res->attr.divmod.no_remainder = 0; \
106 res = optimize_node(res); \
107 IRN_VRFY_IRG(res, irg); \
111 /* creates a rd constructor for a binop */
112 #define NEW_RD_BINOP(instr) \
114 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
115 ir_node *op1, ir_node *op2, ir_mode *mode) \
118 ir_graph *rem = current_ir_graph; \
119 current_ir_graph = irg; \
120 res = new_bd_##instr(db, block, op1, op2, mode); \
121 current_ir_graph = rem; \
125 /* creates a rd constructor for an unop */
126 #define NEW_RD_UNOP(instr) \
128 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
129 ir_node *op, ir_mode *mode) \
132 ir_graph *rem = current_ir_graph; \
133 current_ir_graph = irg; \
134 res = new_bd_##instr(db, block, op, mode); \
135 current_ir_graph = rem; \
139 /* creates a rd constructor for an divop */
140 #define NEW_RD_DIVOP(instr) \
142 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
143 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
146 ir_graph *rem = current_ir_graph; \
147 current_ir_graph = irg; \
148 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
149 current_ir_graph = rem; \
153 /* creates a d constructor for an binop */
154 #define NEW_D_BINOP(instr) \
156 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
157 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
160 /* creates a d constructor for an unop */
161 #define NEW_D_UNOP(instr) \
163 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
164 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
167 #include "gen_ir_cons.c.inl"
169 static ir_node *new_bd_Start(dbg_info *db, ir_node *block)
172 ir_graph *irg = current_ir_graph;
174 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
176 IRN_VRFY_IRG(res, irg);
180 static ir_node *new_bd_End(dbg_info *db, ir_node *block)
183 ir_graph *irg = current_ir_graph;
185 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
187 IRN_VRFY_IRG(res, irg);
192 * Creates a Phi node with all predecessors. Calling this constructor
193 * is only allowed if the corresponding block is mature.
195 static ir_node *new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
198 ir_graph *irg = current_ir_graph;
202 /* Don't assert that block matured: the use of this constructor is strongly
204 if (get_Block_matured(block))
205 assert(get_irn_arity(block) == arity);
207 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
209 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
211 for (i = arity - 1; i >= 0; --i)
212 if (is_Unknown(in[i])) {
217 if (!has_unknown) res = optimize_node(res);
218 IRN_VRFY_IRG(res, irg);
220 /* Memory Phis in endless loops must be kept alive.
221 As we can't distinguish these easily we keep all of them alive. */
222 if (is_Phi(res) && mode == mode_M)
223 add_End_keepalive(get_irg_end(irg), res);
227 static ir_node *new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
230 ir_graph *irg = current_ir_graph;
232 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
233 res->attr.con.tv = con;
234 set_Const_type(res, tp); /* Call method because of complex assertion. */
235 res = optimize_node (res);
236 assert(get_Const_type(res) == tp);
237 IRN_VRFY_IRG(res, irg);
240 } /* new_bd_Const_type */
242 static ir_node *new_bd_Const(dbg_info *db, tarval *con)
244 ir_graph *irg = current_ir_graph;
246 return new_rd_Const_type(db, irg, con, firm_unknown_type);
249 static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
251 ir_graph *irg = current_ir_graph;
253 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
254 } /* new_bd_Const_long */
256 static ir_node *new_bd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
260 assert(arg->op == op_Cond);
261 arg->attr.cond.default_proj = max_proj;
262 res = new_rd_Proj(db, arg, mode_X, max_proj);
264 } /* new_bd_defaultProj */
266 static ir_node *new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store,
267 ir_node *objptr, int arity, ir_node **in,
273 ir_graph *irg = current_ir_graph;
274 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
276 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
279 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
282 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
284 * Sel's can select functions which should be of mode mode_P_code.
286 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
287 res->attr.sel.entity = ent;
288 res = optimize_node(res);
289 IRN_VRFY_IRG(res, irg);
293 static ir_node *new_bd_SymConst_type(dbg_info *db, ir_node *block,
294 ir_mode *mode, symconst_symbol value,
295 symconst_kind symkind, ir_type *tp)
297 ir_graph *irg = current_ir_graph;
298 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
300 res->attr.symc.kind = symkind;
301 res->attr.symc.sym = value;
302 res->attr.symc.tp = tp;
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
307 } /* new_bd_SymConst_type */
309 static ir_node *new_bd_Sync(dbg_info *db, ir_node *block)
312 ir_graph *irg = current_ir_graph;
314 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
315 /* no need to call optimize node here, Sync are always created with no predecessors */
316 IRN_VRFY_IRG(res, irg);
321 static ir_node *new_bd_EndReg(dbg_info *db, ir_node *block)
324 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
327 set_irg_end_reg(irg, res);
328 IRN_VRFY_IRG(res, irg);
330 } /* new_bd_EndReg */
332 static ir_node *new_bd_EndExcept(dbg_info *db, ir_node *block)
335 ir_graph *irg = current_ir_graph;
337 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
338 set_irg_end_except(irg, res);
339 IRN_VRFY_IRG (res, irg);
341 } /* new_bd_EndExcept */
343 static ir_node *new_bd_ASM(dbg_info *db, ir_node *block, int arity,
344 ir_node *in[], ir_asm_constraint *inputs, int n_outs,
345 ir_asm_constraint *outputs, int n_clobber,
346 ident *clobber[], ident *asm_text)
349 ir_graph *irg = current_ir_graph;
351 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
352 res->attr.assem.pin_state = op_pin_state_pinned;
353 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
354 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
355 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
356 res->attr.assem.asm_text = asm_text;
358 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
359 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
360 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
367 /* --------------------------------------------- */
368 /* private interfaces, for professional use only */
369 /* --------------------------------------------- */
371 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
373 ir_graph *rem = current_ir_graph;
376 current_ir_graph = irg;
377 res = new_bd_Start(db, block);
378 current_ir_graph = rem;
383 ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
386 ir_graph *rem = current_ir_graph;
388 current_ir_graph = irg;
389 res = new_bd_End(db, block);
390 current_ir_graph = rem;
395 /* Creates a Phi node with all predecessors. Calling this constructor
396 is only allowed if the corresponding block is mature. */
397 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
400 ir_graph *rem = current_ir_graph;
402 current_ir_graph = get_Block_irg(block);
403 res = new_bd_Phi(db, block,arity, in, mode);
404 current_ir_graph = rem;
409 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
412 ir_graph *rem = current_ir_graph;
414 current_ir_graph = irg;
415 res = new_bd_Const_type(db, con, tp);
416 current_ir_graph = rem;
419 } /* new_rd_Const_type */
421 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
424 //#ifdef USE_ORIGINAL
425 ir_graph *rem = current_ir_graph;
427 current_ir_graph = irg;
428 res = new_bd_Const_type(db, con, firm_unknown_type);
429 current_ir_graph = rem;
431 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
437 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
439 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
440 } /* new_rd_Const_long */
442 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
444 return new_bd_defaultProj(db, arg, max_proj);
445 } /* new_rd_defaultProj */
447 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
448 ir_node *objptr, ir_entity *ent)
451 ir_graph *rem = current_ir_graph;
453 current_ir_graph = get_Block_irg(block);
454 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
455 current_ir_graph = rem;
458 } /* new_rd_simpleSel */
460 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
461 symconst_symbol value, symconst_kind symkind,
465 ir_graph *rem = current_ir_graph;
466 ir_node *block = get_irg_start_block(irg);
468 current_ir_graph = irg;
469 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
470 current_ir_graph = rem;
473 } /* new_rd_SymConst_type */
475 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
476 symconst_symbol value, symconst_kind symkind)
478 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
479 } /* new_rd_SymConst */
481 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
484 sym.entity_p = symbol;
485 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
486 } /* new_rd_SymConst_addr_ent */
488 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
491 sym.entity_p = symbol;
492 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
493 } /* new_rd_SymConst_ofs_ent */
495 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
499 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
500 } /* new_rd_SymConst_type_tag */
502 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
506 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
507 } /* new_rd_SymConst_size */
509 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
513 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
514 } /* new_rd_SymConst_align */
516 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
519 ir_graph *rem = current_ir_graph;
522 current_ir_graph = get_Block_irg(block);
523 res = new_bd_Sync(db, block);
524 current_ir_graph = rem;
526 for (i = 0; i < arity; ++i)
527 add_Sync_pred(res, in[i]);
532 ir_node *new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
536 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
537 set_irg_end_reg(irg, res);
538 IRN_VRFY_IRG(res, irg);
540 } /* new_rd_EndReg */
542 ir_node *new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
546 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
547 set_irg_end_except(irg, res);
548 IRN_VRFY_IRG (res, irg);
550 } /* new_rd_EndExcept */
552 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
553 int arity, ir_node *in[], ir_asm_constraint *inputs,
554 int n_outs, ir_asm_constraint *outputs,
555 int n_clobber, ident *clobber[], ident *asm_text)
558 ir_graph *rem = current_ir_graph;
560 current_ir_graph = get_Block_irg(block);
561 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
562 current_ir_graph = rem;
567 ir_node *new_r_Start(ir_graph *irg, ir_node *block)
569 return new_rd_Start(NULL, irg, block);
571 ir_node *new_r_End(ir_graph *irg, ir_node *block)
573 return new_rd_End(NULL, irg, block);
575 ir_node *new_r_Const(ir_graph *irg, tarval *con)
577 return new_rd_Const(NULL, irg, con);
579 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
581 return new_rd_Const_long(NULL, irg, mode, value);
583 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
585 return new_rd_Const_type(NULL, irg, con, tp);
587 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
588 symconst_kind symkind)
590 return new_rd_SymConst(NULL, irg, mode, value, symkind);
592 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
595 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
597 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
599 return new_rd_Phi(NULL, block, arity, in, mode);
601 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
603 return new_rd_Sync(NULL, block, arity, in);
605 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
607 return new_rd_defaultProj(NULL, arg, max_proj);
609 ir_node *new_r_Bad(ir_graph *irg)
611 return get_irg_bad(irg);
613 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block)
615 return new_rd_EndReg(NULL, irg, block);
617 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block)
619 return new_rd_EndExcept(NULL, irg, block);
621 ir_node *new_r_NoMem(ir_graph *irg)
623 return get_irg_no_mem(irg);
625 ir_node *new_r_ASM(ir_node *block,
626 int arity, ir_node *in[], ir_asm_constraint *inputs,
627 int n_outs, ir_asm_constraint *outputs,
628 int n_clobber, ident *clobber[], ident *asm_text)
630 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
633 /** ********************/
634 /** public interfaces */
635 /** construction tools */
637 ir_node *new_d_Start(dbg_info *db)
641 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
642 op_Start, mode_T, 0, NULL);
644 res = optimize_node(res);
645 IRN_VRFY_IRG(res, current_ir_graph);
649 ir_node *new_d_End(dbg_info *db)
652 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
653 op_End, mode_X, -1, NULL);
654 res = optimize_node(res);
655 IRN_VRFY_IRG(res, current_ir_graph);
660 /* ***********************************************************************/
661 /* Methods necessary for automatic Phi node creation */
663 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
664 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
665 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
666 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
668 Call Graph: ( A ---> B == A "calls" B)
670 get_value mature_immBlock
678 get_r_value_internal |
682 new_rd_Phi0 new_rd_Phi_in
684 * *************************************************************************** */
686 /** Creates a Phi node with 0 predecessors. */
687 static inline ir_node *new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
691 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
692 IRN_VRFY_IRG(res, irg);
698 * Internal constructor of a Phi node by a phi_merge operation.
700 * @param irg the graph on which the Phi will be constructed
701 * @param block the block in which the Phi will be constructed
702 * @param mode the mod eof the Phi node
703 * @param in the input array of the phi node
704 * @param ins number of elements in the input array
705 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
706 * the value for which the new Phi is constructed
708 static inline ir_node *new_rd_Phi_in(ir_graph *irg, ir_node *block,
709 ir_mode *mode, ir_node **in, int ins,
713 ir_node *res, *known;
715 /* Allocate a new node on the obstack. The allocation copies the in
717 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
718 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
720 /* This loop checks whether the Phi has more than one predecessor.
721 If so, it is a real Phi node and we break the loop. Else the
722 Phi node merges the same definition on several paths and therefore
724 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
726 for (i = ins - 1; i >= 0; --i) {
729 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
731 /* Optimize self referencing Phis: We can't detect them yet properly, as
732 they still refer to the Phi0 they will replace. So replace right now. */
733 if (phi0 && in[i] == phi0)
736 if (in[i] == res || in[i] == known)
745 /* i < 0: there is at most one predecessor, we don't need a phi node. */
748 edges_node_deleted(res, current_ir_graph);
749 obstack_free(current_ir_graph->obst, res);
751 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
752 order, an enclosing Phi know may get superfluous. */
753 res = optimize_in_place_2(known);
755 exchange(known, res);
760 /* A undefined value, e.g., in unreachable code. */
764 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
765 IRN_VRFY_IRG(res, irg);
766 /* Memory Phis in endless loops must be kept alive.
767 As we can't distinguish these easily we keep all of them alive. */
768 if (is_Phi(res) && mode == mode_M)
769 add_End_keepalive(get_irg_end(irg), res);
773 } /* new_rd_Phi_in */
775 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
777 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
780 * Construct a new frag_array for node n.
781 * Copy the content from the current graph_arr of the corresponding block:
782 * this is the current state.
783 * Set ProjM(n) as current memory state.
784 * Further the last entry in frag_arr of current block points to n. This
785 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
787 static inline ir_node **new_frag_arr(ir_node *n)
792 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
793 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
794 sizeof(ir_node *)*current_ir_graph->n_loc);
796 /* turn off optimization before allocating Proj nodes, as res isn't
798 opt = get_opt_optimize(); set_optimize(0);
799 /* Here we rely on the fact that all frag ops have Memory as first result! */
801 arr[0] = new_Proj(n, mode_M, pn_Call_M);
802 } else if (is_CopyB(n)) {
803 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
805 assert((pn_Quot_M == pn_DivMod_M) &&
806 (pn_Quot_M == pn_Div_M) &&
807 (pn_Quot_M == pn_Mod_M) &&
808 (pn_Quot_M == pn_Load_M) &&
809 (pn_Quot_M == pn_Store_M) &&
810 (pn_Quot_M == pn_Alloc_M) &&
811 (pn_Quot_M == pn_Bound_M));
812 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
816 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
821 * Returns the frag_arr from a node.
823 static inline ir_node **get_frag_arr(ir_node *n)
825 switch (get_irn_opcode(n)) {
827 return n->attr.call.exc.frag_arr;
829 return n->attr.alloc.exc.frag_arr;
831 return n->attr.load.exc.frag_arr;
833 return n->attr.store.exc.frag_arr;
835 return n->attr.except.frag_arr;
839 static void set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
844 for (i = 1024; i >= 0; --i)
849 if (frag_arr[pos] == NULL)
851 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
852 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
853 assert(arr != frag_arr && "Endless recursion detected");
858 assert(!"potential endless recursion in set_frag_value");
859 } /* set_frag_value */
861 static ir_node *get_r_frag_value_internal(ir_node *block, ir_node *cfOp,
862 int pos, ir_mode *mode)
867 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
869 frag_arr = get_frag_arr(cfOp);
872 if (block->attr.block.graph_arr[pos] != NULL) {
873 /* There was a set_value() after the cfOp and no get_value() before that
874 set_value(). We must build a Phi node now. */
875 if (block->attr.block.is_matured) {
876 int ins = get_irn_arity(block);
878 NEW_ARR_A(ir_node *, nin, ins);
879 res = phi_merge(block, pos, mode, nin, ins);
881 res = new_rd_Phi0(current_ir_graph, block, mode);
882 res->attr.phi.u.pos = pos;
883 res->attr.phi.next = block->attr.block.phis;
884 block->attr.block.phis = res;
887 /* It's a Phi, we can write this into all graph_arrs with NULL */
888 set_frag_value(block->attr.block.graph_arr, pos, res);
890 res = get_r_value_internal(block, pos, mode);
891 set_frag_value(block->attr.block.graph_arr, pos, res);
895 } /* get_r_frag_value_internal */
898 * Check whether a control flownode cf_pred represents an exception flow.
900 * @param cf_pred the control flow node
901 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
903 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
906 * Note: all projections from a raise are "exceptional control flow" we we handle it
907 * like a normal Jmp, because there is no "regular" one.
908 * That's why Raise is no "fragile_op"!
910 if (is_fragile_op(prev_cf_op)) {
911 if (is_Proj(cf_pred)) {
912 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
913 /* the regular control flow, NO exception */
916 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
919 /* Hmm, exception but not a Proj? */
920 panic("unexpected condition: fragile op without a proj");
923 } /* is_exception_flow */
926 * Computes the predecessors for the real phi node, and then
927 * allocates and returns this node. The routine called to allocate the
928 * node might optimize it away and return a real value.
929 * This function must be called with an in-array of proper size.
931 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
933 ir_node *prevBlock, *res, *phi0, *phi0_all;
936 /* If this block has no value at pos create a Phi0 and remember it
937 in graph_arr to break recursions.
938 Else we may not set graph_arr as there a later value is remembered. */
940 if (block->attr.block.graph_arr[pos] == NULL) {
941 ir_graph *irg = current_ir_graph;
943 if (block == get_irg_start_block(irg)) {
944 /* Collapsing to Bad tarvals is no good idea.
945 So we call a user-supplied routine here that deals with this case as
946 appropriate for the given language. Sorrily the only help we can give
947 here is the position.
949 Even if all variables are defined before use, it can happen that
950 we get to the start block, if a Cond has been replaced by a tuple
951 (bad, jmp). In this case we call the function needlessly, eventually
952 generating an non existent error.
953 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
956 if (default_initialize_local_variable != NULL) {
957 ir_node *rem = get_cur_block();
959 set_cur_block(block);
960 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
964 block->attr.block.graph_arr[pos] = new_Unknown(mode);
965 /* We don't need to care about exception ops in the start block.
966 There are none by definition. */
967 return block->attr.block.graph_arr[pos];
969 phi0 = new_rd_Phi0(irg, block, mode);
970 block->attr.block.graph_arr[pos] = phi0;
971 if (get_opt_precise_exc_context()) {
972 /* Set graph_arr for fragile ops. Also here we should break recursion.
973 We could choose a cyclic path through an cfop. But the recursion would
974 break at some point. */
975 set_frag_value(block->attr.block.graph_arr, pos, phi0);
980 /* This loop goes to all predecessor blocks of the block the Phi node
981 is in and there finds the operands of the Phi node by calling
982 get_r_value_internal. */
983 for (i = 1; i <= ins; ++i) {
984 ir_node *cf_pred = block->in[i];
985 ir_node *prevCfOp = skip_Proj(cf_pred);
987 if (is_Bad(prevCfOp)) {
988 /* In case a Cond has been optimized we would get right to the start block
989 with an invalid definition. */
990 nin[i-1] = new_Bad();
993 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
995 if (!is_Bad(prevBlock)) {
996 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
997 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
998 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
1000 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
1002 nin[i-1] = new_Bad();
1006 /* We want to pass the Phi0 node to the constructor: this finds additional
1007 optimization possibilities.
1008 The Phi0 node either is allocated in this function, or it comes from
1009 a former call to get_r_value_internal(). In this case we may not yet
1010 exchange phi0, as this is done in mature_immBlock(). */
1012 phi0_all = block->attr.block.graph_arr[pos];
1013 if (! is_Phi0(phi0_all) ||
1014 get_irn_arity(phi0_all) != 0 ||
1015 get_nodes_block(phi0_all) != block)
1021 /* After collecting all predecessors into the array nin a new Phi node
1022 with these predecessors is created. This constructor contains an
1023 optimization: If all predecessors of the Phi node are identical it
1024 returns the only operand instead of a new Phi node. */
1025 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1027 /* In case we allocated a Phi0 node at the beginning of this procedure,
1028 we need to exchange this Phi0 with the real Phi. */
1030 exchange(phi0, res);
1031 block->attr.block.graph_arr[pos] = res;
1032 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1033 only an optimization. */
1040 * This function returns the last definition of a value. In case
1041 * this value was last defined in a previous block, Phi nodes are
1042 * inserted. If the part of the firm graph containing the definition
1043 * is not yet constructed, a dummy Phi node is returned.
1045 * @param block the current block
1046 * @param pos the value number of the value searched
1047 * @param mode the mode of this value (needed for Phi construction)
1049 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1052 /* There are 4 cases to treat.
1054 1. The block is not mature and we visit it the first time. We can not
1055 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1056 predecessors is returned. This node is added to the linked list (block
1057 attribute "phis") of the containing block to be completed when this block is
1058 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1061 2. The value is already known in this block, graph_arr[pos] is set and we
1062 visit the block the first time. We can return the value without
1063 creating any new nodes.
1065 3. The block is mature and we visit it the first time. A Phi node needs
1066 to be created (phi_merge). If the Phi is not needed, as all it's
1067 operands are the same value reaching the block through different
1068 paths, it's optimized away and the value itself is returned.
1070 4. The block is mature, and we visit it the second time. Now two
1071 subcases are possible:
1072 * The value was computed completely the last time we were here. This
1073 is the case if there is no loop. We can return the proper value.
1074 * The recursion that visited this node and set the flag did not
1075 return yet. We are computing a value in a loop and need to
1076 break the recursion. This case only happens if we visited
1077 the same block with phi_merge before, which inserted a Phi0.
1078 So we return the Phi0.
1081 /* case 4 -- already visited. */
1082 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1083 /* As phi_merge allocates a Phi0 this value is always defined. Here
1084 is the critical difference of the two algorithms. */
1085 assert(block->attr.block.graph_arr[pos]);
1086 return block->attr.block.graph_arr[pos];
1089 /* visited the first time */
1090 set_irn_visited(block, get_irg_visited(current_ir_graph));
1092 /* Get the local valid value */
1093 res = block->attr.block.graph_arr[pos];
1095 /* case 2 -- If the value is actually computed, return it. */
1099 if (block->attr.block.is_matured) { /* case 3 */
1101 /* The Phi has the same amount of ins as the corresponding block. */
1102 int ins = get_irn_arity(block);
1104 NEW_ARR_A(ir_node *, nin, ins);
1106 /* Phi merge collects the predecessors and then creates a node. */
1107 res = phi_merge(block, pos, mode, nin, ins);
1109 } else { /* case 1 */
1110 /* The block is not mature, we don't know how many in's are needed. A Phi
1111 with zero predecessors is created. Such a Phi node is called Phi0
1112 node. The Phi0 is then added to the list of Phi0 nodes in this block
1113 to be matured by mature_immBlock later.
1114 The Phi0 has to remember the pos of it's internal value. If the real
1115 Phi is computed, pos is used to update the array with the local
1117 res = new_rd_Phi0(current_ir_graph, block, mode);
1118 res->attr.phi.u.pos = pos;
1119 res->attr.phi.next = block->attr.block.phis;
1120 block->attr.block.phis = res;
1123 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1125 /* The local valid value is available now. */
1126 block->attr.block.graph_arr[pos] = res;
1129 } /* get_r_value_internal */
1131 /* ************************************************************************** */
1134 * Finalize a Block node, when all control flows are known.
1135 * Acceptable parameters are only Block nodes.
1137 void mature_immBlock(ir_node *block)
1143 assert(is_Block(block));
1144 if (!get_Block_matured(block)) {
1145 ir_graph *irg = current_ir_graph;
1147 ins = ARR_LEN(block->in) - 1;
1148 /* Fix block parameters */
1149 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1151 /* An array for building the Phi nodes. */
1152 NEW_ARR_A(ir_node *, nin, ins);
1154 /* Traverse a chain of Phi nodes attached to this block and mature
1156 for (n = block->attr.block.phis; n; n = next) {
1157 inc_irg_visited(irg);
1158 next = n->attr.phi.next;
1159 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1162 block->attr.block.is_matured = 1;
1164 /* Now, as the block is a finished Firm node, we can optimize it.
1165 Since other nodes have been allocated since the block was created
1166 we can not free the node on the obstack. Therefore we have to call
1167 optimize_in_place().
1168 Unfortunately the optimization does not change a lot, as all allocated
1169 nodes refer to the unoptimized node.
1170 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1171 block = optimize_in_place_2(block);
1172 IRN_VRFY_IRG(block, irg);
1174 } /* mature_immBlock */
1176 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
1178 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1181 ir_node *new_d_Const(dbg_info *db, tarval *con)
1183 return new_bd_Const(db, con);
1186 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
1188 return new_bd_Const_long(db, mode, value);
1189 } /* new_d_Const_long */
1191 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
1193 return new_bd_Const_type(db, con, tp);
1194 } /* new_d_Const_type */
1197 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
1200 assert(arg->op == op_Cond);
1201 arg->attr.cond.default_proj = max_proj;
1202 res = new_d_Proj(db, arg, mode_X, max_proj);
1204 } /* new_d_defaultProj */
1207 * Allocate a frag array for a node if the current graph state is phase_building.
1209 * @param irn the node for which the frag array should be allocated
1210 * @param op the opcode of the (original) node, if does not match opcode of irn,
1212 * @param frag_store the address of the frag store in irn attributes, if this
1213 * address contains a value != NULL, does nothing
1215 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
1217 if (get_opt_precise_exc_context()) {
1218 if ((current_ir_graph->phase_state == phase_building) &&
1219 (get_irn_op(irn) == op) && /* Could be optimized away. */
1220 !*frag_store) /* Could be a cse where the arr is already set. */ {
1221 *frag_store = new_frag_arr(irn);
1224 } /* firm_alloc_frag_arr */
1226 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1227 /* GL: objptr was called frame before. Frame was a bad choice for the name
1228 as the operand could as well be a pointer to a dynamic object. */
1230 return new_bd_Sel(db, current_ir_graph->current_block,
1231 store, objptr, 0, NULL, ent);
1232 } /* new_d_simpleSel */
1234 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
1236 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1238 } /* new_d_SymConst_type */
1240 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
1242 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1243 value, kind, firm_unknown_type);
1244 } /* new_d_SymConst */
1246 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
1248 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1251 ir_node *new_d_EndReg(dbg_info *db)
1253 return new_bd_EndReg(db, current_ir_graph->current_block);
1254 } /* new_d_EndReg */
1256 ir_node *new_d_EndExcept(dbg_info *db)
1258 return new_bd_EndExcept(db, current_ir_graph->current_block);
1259 } /* new_d_EndExcept */
1262 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1263 int n_outs, ir_asm_constraint *outputs, int n_clobber,
1264 ident *clobber[], ident *asm_text)
1266 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1269 /* ********************************************************************* */
1270 /* Comfortable interface with automatic Phi node construction. */
1271 /* (Uses also constructors of ?? interface, except new_Block. */
1272 /* ********************************************************************* */
1274 /* Block construction */
1275 /* immature Block without predecessors */
1276 ir_node *new_d_immBlock(dbg_info *db)
1280 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1281 /* creates a new dynamic in-array as length of in is -1 */
1282 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1284 /* macroblock head */
1287 res->attr.block.is_matured = 0;
1288 res->attr.block.is_dead = 0;
1289 res->attr.block.is_mb_head = 1;
1290 res->attr.block.irg.irg = current_ir_graph;
1291 res->attr.block.backedge = NULL;
1292 res->attr.block.in_cg = NULL;
1293 res->attr.block.cg_backedge = NULL;
1294 res->attr.block.extblk = NULL;
1295 res->attr.block.region = NULL;
1296 res->attr.block.mb_depth = 0;
1297 res->attr.block.entity = NULL;
1299 set_Block_block_visited(res, 0);
1301 /* Create and initialize array for Phi-node construction. */
1302 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1303 current_ir_graph->n_loc);
1304 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1306 /* Immature block may not be optimized! */
1307 IRN_VRFY_IRG(res, current_ir_graph);
1310 } /* new_d_immBlock */
1312 ir_node *new_immBlock(void)
1314 return new_d_immBlock(NULL);
1315 } /* new_immBlock */
1317 /* immature PartBlock with its predecessors */
1318 ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
1320 ir_node *res = new_d_immBlock(db);
1321 ir_node *blk = get_nodes_block(pred_jmp);
1323 res->in[0] = blk->in[0];
1324 assert(res->in[0] != NULL);
1325 add_immBlock_pred(res, pred_jmp);
1327 res->attr.block.is_mb_head = 0;
1328 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1331 } /* new_d_immPartBlock */
1333 ir_node *new_immPartBlock(ir_node *pred_jmp)
1335 return new_d_immPartBlock(NULL, pred_jmp);
1336 } /* new_immPartBlock */
1338 /* add an edge to a jmp/control flow node */
1339 void add_immBlock_pred(ir_node *block, ir_node *jmp)
1341 int n = ARR_LEN(block->in) - 1;
1343 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1344 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1345 assert(is_ir_node(jmp));
1347 ARR_APP1(ir_node *, block->in, jmp);
1349 hook_set_irn_n(block, n, jmp, NULL);
1350 } /* add_immBlock_pred */
1352 /* changing the current block */
1353 void set_cur_block(ir_node *target)
1355 current_ir_graph->current_block = target;
1356 } /* set_cur_block */
1358 /* ************************ */
1359 /* parameter administration */
1361 /* get a value from the parameter array from the current block by its index */
1362 ir_node *get_d_value(dbg_info *db, int pos, ir_mode *mode)
1364 ir_graph *irg = current_ir_graph;
1365 assert(get_irg_phase_state(irg) == phase_building);
1366 inc_irg_visited(irg);
1371 return get_r_value_internal(irg->current_block, pos + 1, mode);
1374 /* get a value from the parameter array from the current block by its index */
1375 ir_node *get_value(int pos, ir_mode *mode)
1377 return get_d_value(NULL, pos, mode);
1380 /* set a value at position pos in the parameter array from the current block */
1381 void set_value(int pos, ir_node *value)
1383 ir_graph *irg = current_ir_graph;
1384 assert(get_irg_phase_state(irg) == phase_building);
1386 assert(pos+1 < irg->n_loc);
1387 assert(is_ir_node(value));
1388 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1391 /* get the current store */
1392 ir_node *get_store(void)
1394 ir_graph *irg = current_ir_graph;
1396 assert(get_irg_phase_state(irg) == phase_building);
1397 /* GL: one could call get_value instead */
1398 inc_irg_visited(irg);
1399 return get_r_value_internal(irg->current_block, 0, mode_M);
1402 /* set the current store: handles automatic Sync construction for Load nodes */
1403 void set_store(ir_node *store)
1405 ir_node *load, *pload, *pred, *in[2];
1407 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1408 /* Beware: due to dead code elimination, a store might become a Bad node even in
1409 the construction phase. */
1410 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1412 if (get_opt_auto_create_sync()) {
1413 /* handle non-volatile Load nodes by automatically creating Sync's */
1414 load = skip_Proj(store);
1415 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1416 pred = get_Load_mem(load);
1418 if (is_Sync(pred)) {
1419 /* a Load after a Sync: move it up */
1420 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1422 set_Load_mem(load, get_memop_mem(mem));
1423 add_Sync_pred(pred, store);
1426 pload = skip_Proj(pred);
1427 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1428 /* a Load after a Load: create a new Sync */
1429 set_Load_mem(load, get_Load_mem(pload));
1433 store = new_Sync(2, in);
1438 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1441 void keep_alive(ir_node *ka)
1443 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1446 /* --- Useful access routines --- */
1447 /* Returns the current block of the current graph. To set the current
1448 block use set_cur_block. */
1449 ir_node *get_cur_block(void)
1451 return get_irg_current_block(current_ir_graph);
1452 } /* get_cur_block */
1454 /* Returns the frame type of the current graph */
1455 ir_type *get_cur_frame_type(void)
1457 return get_irg_frame_type(current_ir_graph);
1458 } /* get_cur_frame_type */
1461 /* ********************************************************************* */
1464 /* call once for each run of the library */
1465 void firm_init_cons(uninitialized_local_variable_func_t *func)
1467 default_initialize_local_variable = func;
1468 } /* firm_init_cons */
1470 void irp_finalize_cons(void)
1473 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1474 irg_finalize_cons(get_irp_irg(i));
1476 irp->phase_state = phase_high;
1477 } /* irp_finalize_cons */
1479 ir_node *new_Start(void)
1481 return new_d_Start(NULL);
1483 ir_node *new_End(void)
1485 return new_d_End(NULL);
1487 ir_node *new_Const(tarval *con)
1489 return new_d_Const(NULL, con);
1492 ir_node *new_Const_long(ir_mode *mode, long value)
1494 return new_d_Const_long(NULL, mode, value);
1497 ir_node *new_Const_type(tarval *con, ir_type *tp)
1499 return new_d_Const_type(NULL, con, tp);
1502 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1504 return new_d_SymConst_type(NULL, mode, value, kind, type);
1506 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1508 return new_d_SymConst(NULL, mode, value, kind);
1510 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1512 return new_d_simpleSel(NULL, store, objptr, ent);
1514 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1516 return new_d_Phi(NULL, arity, in, mode);
1518 ir_node *new_Sync(int arity, ir_node *in[])
1520 return new_d_Sync(NULL, arity, in);
1522 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1524 return new_d_defaultProj(NULL, arg, max_proj);
1526 ir_node *new_Bad(void)
1528 return get_irg_bad(current_ir_graph);
1530 ir_node *new_EndReg(void)
1532 return new_d_EndReg(NULL);
1534 ir_node *new_EndExcept(void)
1536 return new_d_EndExcept(NULL);
1538 ir_node *new_NoMem(void)
1540 return get_irg_no_mem(current_ir_graph);
1542 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1543 int n_outs, ir_asm_constraint *outputs,
1544 int n_clobber, ident *clobber[], ident *asm_text)
1546 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1549 /* create a new anchor node */
1550 ir_node *new_Anchor(ir_graph *irg)
1552 ir_node *in[anchor_last];
1553 memset(in, 0, sizeof(in));
1554 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);