2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
46 /* when we need verifying */
48 # define IRN_VRFY_IRG(res, irg)
50 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
54 * Language dependent variable initialization callback.
56 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
58 /* creates a bd constructor for a binop */
59 #define NEW_BD_BINOP(instr) \
61 new_bd_##instr(dbg_info *db, ir_node *block, \
62 ir_node *op1, ir_node *op2, ir_mode *mode) \
66 ir_graph *irg = current_ir_graph; \
69 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
70 res = optimize_node(res); \
71 IRN_VRFY_IRG(res, irg); \
75 /* creates a bd constructor for an unop */
76 #define NEW_BD_UNOP(instr) \
78 new_bd_##instr(dbg_info *db, ir_node *block, \
79 ir_node *op, ir_mode *mode) \
82 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an divop */
90 #define NEW_BD_DIVOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
97 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
102 res->attr.divmod.exc.pin_state = state; \
103 res->attr.divmod.resmode = mode; \
104 res->attr.divmod.no_remainder = 0; \
105 res = optimize_node(res); \
106 IRN_VRFY_IRG(res, irg); \
110 /* creates a rd constructor for a binop */
111 #define NEW_RD_BINOP(instr) \
113 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
114 ir_node *op1, ir_node *op2, ir_mode *mode) \
117 ir_graph *rem = current_ir_graph; \
118 current_ir_graph = irg; \
119 res = new_bd_##instr(db, block, op1, op2, mode); \
120 current_ir_graph = rem; \
124 /* creates a rd constructor for an unop */
125 #define NEW_RD_UNOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an divop */
139 #define NEW_RD_DIVOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
148 current_ir_graph = rem; \
152 /* creates a d constructor for an binop */
153 #define NEW_D_BINOP(instr) \
155 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
156 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
159 /* creates a d constructor for an unop */
160 #define NEW_D_UNOP(instr) \
162 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
163 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
166 #include "gen_ir_cons.c.inl"
168 static ir_node *new_bd_Start(dbg_info *db, ir_node *block)
171 ir_graph *irg = current_ir_graph;
173 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
175 IRN_VRFY_IRG(res, irg);
179 static ir_node *new_bd_End(dbg_info *db, ir_node *block)
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
186 IRN_VRFY_IRG(res, irg);
191 * Creates a Phi node with all predecessors. Calling this constructor
192 * is only allowed if the corresponding block is mature.
194 static ir_node *new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
197 ir_graph *irg = current_ir_graph;
201 /* Don't assert that block matured: the use of this constructor is strongly
203 if (get_Block_matured(block))
204 assert(get_irn_arity(block) == arity);
206 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
208 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
210 for (i = arity - 1; i >= 0; --i)
211 if (is_Unknown(in[i])) {
216 if (!has_unknown) res = optimize_node(res);
217 IRN_VRFY_IRG(res, irg);
219 /* Memory Phis in endless loops must be kept alive.
220 As we can't distinguish these easily we keep all of them alive. */
221 if (is_Phi(res) && mode == mode_M)
222 add_End_keepalive(get_irg_end(irg), res);
226 static ir_node *new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
229 ir_graph *irg = current_ir_graph;
231 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
232 res->attr.con.tv = con;
233 set_Const_type(res, tp); /* Call method because of complex assertion. */
234 res = optimize_node (res);
235 assert(get_Const_type(res) == tp);
236 IRN_VRFY_IRG(res, irg);
239 } /* new_bd_Const_type */
241 static ir_node *new_bd_Const(dbg_info *db, tarval *con)
243 ir_graph *irg = current_ir_graph;
245 return new_rd_Const_type(db, irg, con, firm_unknown_type);
248 static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
250 ir_graph *irg = current_ir_graph;
252 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
253 } /* new_bd_Const_long */
255 static ir_node *new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
260 assert(arg->op == op_Cond);
261 arg->attr.cond.default_proj = max_proj;
262 res = new_rd_Proj(db, block, arg, mode_X, max_proj);
264 } /* new_bd_defaultProj */
266 static ir_node *new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store,
267 ir_node *objptr, int arity, ir_node **in,
273 ir_graph *irg = current_ir_graph;
274 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
276 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
279 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
282 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
284 * Sel's can select functions which should be of mode mode_P_code.
286 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
287 res->attr.sel.entity = ent;
288 res = optimize_node(res);
289 IRN_VRFY_IRG(res, irg);
293 static ir_node *new_bd_SymConst_type(dbg_info *db, ir_node *block,
294 ir_mode *mode, symconst_symbol value,
295 symconst_kind symkind, ir_type *tp)
297 ir_graph *irg = current_ir_graph;
298 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
300 res->attr.symc.kind = symkind;
301 res->attr.symc.sym = value;
302 res->attr.symc.tp = tp;
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
307 } /* new_bd_SymConst_type */
309 static ir_node *new_bd_Sync(dbg_info *db, ir_node *block)
312 ir_graph *irg = current_ir_graph;
314 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
315 /* no need to call optimize node here, Sync are always created with no predecessors */
316 IRN_VRFY_IRG(res, irg);
321 static ir_node *new_bd_EndReg(dbg_info *db, ir_node *block)
324 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
327 set_irg_end_reg(irg, res);
328 IRN_VRFY_IRG(res, irg);
330 } /* new_bd_EndReg */
332 static ir_node *new_bd_EndExcept(dbg_info *db, ir_node *block)
335 ir_graph *irg = current_ir_graph;
337 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
338 set_irg_end_except(irg, res);
339 IRN_VRFY_IRG (res, irg);
341 } /* new_bd_EndExcept */
343 static ir_node *new_bd_ASM(dbg_info *db, ir_node *block, int arity,
344 ir_node *in[], ir_asm_constraint *inputs, int n_outs,
345 ir_asm_constraint *outputs, int n_clobber,
346 ident *clobber[], ident *asm_text)
349 ir_graph *irg = current_ir_graph;
351 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
352 res->attr.assem.pin_state = op_pin_state_pinned;
353 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
354 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
355 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
356 res->attr.assem.asm_text = asm_text;
358 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
359 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
360 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
367 /* --------------------------------------------- */
368 /* private interfaces, for professional use only */
369 /* --------------------------------------------- */
371 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
373 ir_graph *rem = current_ir_graph;
376 current_ir_graph = irg;
377 res = new_bd_Start(db, block);
378 current_ir_graph = rem;
383 ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
386 ir_graph *rem = current_ir_graph;
388 current_ir_graph = irg;
389 res = new_bd_End(db, block);
390 current_ir_graph = rem;
395 /* Creates a Phi node with all predecessors. Calling this constructor
396 is only allowed if the corresponding block is mature. */
397 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
400 ir_graph *rem = current_ir_graph;
402 current_ir_graph = get_Block_irg(block);
403 res = new_bd_Phi(db, block,arity, in, mode);
404 current_ir_graph = rem;
409 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
412 ir_graph *rem = current_ir_graph;
414 current_ir_graph = irg;
415 res = new_bd_Const_type(db, con, tp);
416 current_ir_graph = rem;
419 } /* new_rd_Const_type */
421 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
424 //#ifdef USE_ORIGINAL
425 ir_graph *rem = current_ir_graph;
427 current_ir_graph = irg;
428 res = new_bd_Const_type(db, con, firm_unknown_type);
429 current_ir_graph = rem;
431 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
437 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
439 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
440 } /* new_rd_Const_long */
442 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg, long max_proj)
445 ir_graph *rem = current_ir_graph;
447 current_ir_graph = get_Block_irg(block);
448 res = new_bd_defaultProj(db, block, arg, max_proj);
449 current_ir_graph = rem;
452 } /* new_rd_defaultProj */
454 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
455 ir_node *objptr, ir_entity *ent)
458 ir_graph *rem = current_ir_graph;
460 current_ir_graph = get_Block_irg(block);
461 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
462 current_ir_graph = rem;
465 } /* new_rd_simpleSel */
467 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
468 symconst_symbol value, symconst_kind symkind,
472 ir_graph *rem = current_ir_graph;
473 ir_node *block = get_irg_start_block(irg);
475 current_ir_graph = irg;
476 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
477 current_ir_graph = rem;
480 } /* new_rd_SymConst_type */
482 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
483 symconst_symbol value, symconst_kind symkind)
485 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
486 } /* new_rd_SymConst */
488 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
491 sym.entity_p = symbol;
492 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
493 } /* new_rd_SymConst_addr_ent */
495 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
498 sym.entity_p = symbol;
499 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
500 } /* new_rd_SymConst_ofs_ent */
502 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
506 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
507 } /* new_rd_SymConst_type_tag */
509 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
513 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
514 } /* new_rd_SymConst_size */
516 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
520 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
521 } /* new_rd_SymConst_align */
523 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
526 ir_graph *rem = current_ir_graph;
529 current_ir_graph = get_Block_irg(block);
530 res = new_bd_Sync(db, block);
531 current_ir_graph = rem;
533 for (i = 0; i < arity; ++i)
534 add_Sync_pred(res, in[i]);
539 ir_node *new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
543 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
544 set_irg_end_reg(irg, res);
545 IRN_VRFY_IRG(res, irg);
547 } /* new_rd_EndReg */
549 ir_node *new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
553 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
554 set_irg_end_except(irg, res);
555 IRN_VRFY_IRG (res, irg);
557 } /* new_rd_EndExcept */
559 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
560 int arity, ir_node *in[], ir_asm_constraint *inputs,
561 int n_outs, ir_asm_constraint *outputs,
562 int n_clobber, ident *clobber[], ident *asm_text) {
564 ir_graph *rem = current_ir_graph;
566 current_ir_graph = get_Block_irg(block);
567 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
568 current_ir_graph = rem;
573 ir_node *new_r_Start(ir_graph *irg, ir_node *block)
575 return new_rd_Start(NULL, irg, block);
577 ir_node *new_r_End(ir_graph *irg, ir_node *block)
579 return new_rd_End(NULL, irg, block);
581 ir_node *new_r_Const(ir_graph *irg, tarval *con)
583 return new_rd_Const(NULL, irg, con);
585 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
587 return new_rd_Const_long(NULL, irg, mode, value);
589 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
591 return new_rd_Const_type(NULL, irg, con, tp);
593 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode,
594 symconst_symbol value, symconst_kind symkind) {
595 return new_rd_SymConst(NULL, irg, mode, value, symkind);
597 ir_node *new_r_simpleSel(ir_node *block, ir_node *store,
598 ir_node *objptr, ir_entity *ent) {
599 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
601 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
603 return new_rd_Phi(NULL, block, arity, in, mode);
605 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
607 return new_rd_Sync(NULL, block, arity, in);
609 ir_node *new_r_defaultProj(ir_node *block, ir_node *arg, long max_proj)
611 return new_rd_defaultProj(NULL, block, arg, max_proj);
613 ir_node *new_r_Bad(ir_graph *irg)
615 return get_irg_bad(irg);
617 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block)
619 return new_rd_EndReg(NULL, irg, block);
621 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block)
623 return new_rd_EndExcept(NULL, irg, block);
625 ir_node *new_r_NoMem(ir_graph *irg)
627 return get_irg_no_mem(irg);
629 ir_node *new_r_ASM(ir_node *block,
630 int arity, ir_node *in[], ir_asm_constraint *inputs,
631 int n_outs, ir_asm_constraint *outputs,
632 int n_clobber, ident *clobber[], ident *asm_text) {
633 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
636 /** ********************/
637 /** public interfaces */
638 /** construction tools */
640 ir_node *new_d_Start(dbg_info *db)
644 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
645 op_Start, mode_T, 0, NULL);
647 res = optimize_node(res);
648 IRN_VRFY_IRG(res, current_ir_graph);
652 ir_node *new_d_End(dbg_info *db)
655 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
656 op_End, mode_X, -1, NULL);
657 res = optimize_node(res);
658 IRN_VRFY_IRG(res, current_ir_graph);
663 /* ***********************************************************************/
664 /* Methods necessary for automatic Phi node creation */
666 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
667 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
668 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
669 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
671 Call Graph: ( A ---> B == A "calls" B)
673 get_value mature_immBlock
681 get_r_value_internal |
685 new_rd_Phi0 new_rd_Phi_in
687 * *************************************************************************** */
689 /** Creates a Phi node with 0 predecessors. */
690 static inline ir_node *new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
694 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
695 IRN_VRFY_IRG(res, irg);
701 * Internal constructor of a Phi node by a phi_merge operation.
703 * @param irg the graph on which the Phi will be constructed
704 * @param block the block in which the Phi will be constructed
705 * @param mode the mod eof the Phi node
706 * @param in the input array of the phi node
707 * @param ins number of elements in the input array
708 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
709 * the value for which the new Phi is constructed
711 static inline ir_node *new_rd_Phi_in(ir_graph *irg, ir_node *block,
712 ir_mode *mode, ir_node **in, int ins,
716 ir_node *res, *known;
718 /* Allocate a new node on the obstack. The allocation copies the in
720 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
721 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
723 /* This loop checks whether the Phi has more than one predecessor.
724 If so, it is a real Phi node and we break the loop. Else the
725 Phi node merges the same definition on several paths and therefore
727 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
729 for (i = ins - 1; i >= 0; --i) {
732 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
734 /* Optimize self referencing Phis: We can't detect them yet properly, as
735 they still refer to the Phi0 they will replace. So replace right now. */
736 if (phi0 && in[i] == phi0)
739 if (in[i] == res || in[i] == known)
748 /* i < 0: there is at most one predecessor, we don't need a phi node. */
751 edges_node_deleted(res, current_ir_graph);
752 obstack_free(current_ir_graph->obst, res);
754 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
755 order, an enclosing Phi know may get superfluous. */
756 res = optimize_in_place_2(known);
758 exchange(known, res);
763 /* A undefined value, e.g., in unreachable code. */
767 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
768 IRN_VRFY_IRG(res, irg);
769 /* Memory Phis in endless loops must be kept alive.
770 As we can't distinguish these easily we keep all of them alive. */
771 if (is_Phi(res) && mode == mode_M)
772 add_End_keepalive(get_irg_end(irg), res);
776 } /* new_rd_Phi_in */
778 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
780 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
783 * Construct a new frag_array for node n.
784 * Copy the content from the current graph_arr of the corresponding block:
785 * this is the current state.
786 * Set ProjM(n) as current memory state.
787 * Further the last entry in frag_arr of current block points to n. This
788 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
790 static inline ir_node **new_frag_arr(ir_node *n)
795 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
796 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
797 sizeof(ir_node *)*current_ir_graph->n_loc);
799 /* turn off optimization before allocating Proj nodes, as res isn't
801 opt = get_opt_optimize(); set_optimize(0);
802 /* Here we rely on the fact that all frag ops have Memory as first result! */
804 arr[0] = new_Proj(n, mode_M, pn_Call_M);
805 } else if (is_CopyB(n)) {
806 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
808 assert((pn_Quot_M == pn_DivMod_M) &&
809 (pn_Quot_M == pn_Div_M) &&
810 (pn_Quot_M == pn_Mod_M) &&
811 (pn_Quot_M == pn_Load_M) &&
812 (pn_Quot_M == pn_Store_M) &&
813 (pn_Quot_M == pn_Alloc_M) &&
814 (pn_Quot_M == pn_Bound_M));
815 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
819 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
824 * Returns the frag_arr from a node.
826 static inline ir_node **get_frag_arr(ir_node *n)
828 switch (get_irn_opcode(n)) {
830 return n->attr.call.exc.frag_arr;
832 return n->attr.alloc.exc.frag_arr;
834 return n->attr.load.exc.frag_arr;
836 return n->attr.store.exc.frag_arr;
838 return n->attr.except.frag_arr;
842 static void set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
847 for (i = 1024; i >= 0; --i)
852 if (frag_arr[pos] == NULL)
854 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
855 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
856 assert(arr != frag_arr && "Endless recursion detected");
861 assert(!"potential endless recursion in set_frag_value");
862 } /* set_frag_value */
864 static ir_node *get_r_frag_value_internal(ir_node *block, ir_node *cfOp,
865 int pos, ir_mode *mode)
870 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
872 frag_arr = get_frag_arr(cfOp);
875 if (block->attr.block.graph_arr[pos] != NULL) {
876 /* There was a set_value() after the cfOp and no get_value() before that
877 set_value(). We must build a Phi node now. */
878 if (block->attr.block.is_matured) {
879 int ins = get_irn_arity(block);
881 NEW_ARR_A(ir_node *, nin, ins);
882 res = phi_merge(block, pos, mode, nin, ins);
884 res = new_rd_Phi0(current_ir_graph, block, mode);
885 res->attr.phi.u.pos = pos;
886 res->attr.phi.next = block->attr.block.phis;
887 block->attr.block.phis = res;
890 /* It's a Phi, we can write this into all graph_arrs with NULL */
891 set_frag_value(block->attr.block.graph_arr, pos, res);
893 res = get_r_value_internal(block, pos, mode);
894 set_frag_value(block->attr.block.graph_arr, pos, res);
898 } /* get_r_frag_value_internal */
901 * Check whether a control flownode cf_pred represents an exception flow.
903 * @param cf_pred the control flow node
904 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
906 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
909 * Note: all projections from a raise are "exceptional control flow" we we handle it
910 * like a normal Jmp, because there is no "regular" one.
911 * That's why Raise is no "fragile_op"!
913 if (is_fragile_op(prev_cf_op)) {
914 if (is_Proj(cf_pred)) {
915 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
916 /* the regular control flow, NO exception */
919 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
922 /* Hmm, exception but not a Proj? */
923 assert(!"unexpected condition: fragile op without a proj");
927 } /* is_exception_flow */
930 * Computes the predecessors for the real phi node, and then
931 * allocates and returns this node. The routine called to allocate the
932 * node might optimize it away and return a real value.
933 * This function must be called with an in-array of proper size.
935 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
937 ir_node *prevBlock, *res, *phi0, *phi0_all;
940 /* If this block has no value at pos create a Phi0 and remember it
941 in graph_arr to break recursions.
942 Else we may not set graph_arr as there a later value is remembered. */
944 if (block->attr.block.graph_arr[pos] == NULL) {
945 ir_graph *irg = current_ir_graph;
947 if (block == get_irg_start_block(irg)) {
948 /* Collapsing to Bad tarvals is no good idea.
949 So we call a user-supplied routine here that deals with this case as
950 appropriate for the given language. Sorrily the only help we can give
951 here is the position.
953 Even if all variables are defined before use, it can happen that
954 we get to the start block, if a Cond has been replaced by a tuple
955 (bad, jmp). In this case we call the function needlessly, eventually
956 generating an non existent error.
957 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
960 if (default_initialize_local_variable != NULL) {
961 ir_node *rem = get_cur_block();
963 set_cur_block(block);
964 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
968 block->attr.block.graph_arr[pos] = new_Unknown(mode);
969 /* We don't need to care about exception ops in the start block.
970 There are none by definition. */
971 return block->attr.block.graph_arr[pos];
973 phi0 = new_rd_Phi0(irg, block, mode);
974 block->attr.block.graph_arr[pos] = phi0;
975 if (get_opt_precise_exc_context()) {
976 /* Set graph_arr for fragile ops. Also here we should break recursion.
977 We could choose a cyclic path through an cfop. But the recursion would
978 break at some point. */
979 set_frag_value(block->attr.block.graph_arr, pos, phi0);
984 /* This loop goes to all predecessor blocks of the block the Phi node
985 is in and there finds the operands of the Phi node by calling
986 get_r_value_internal. */
987 for (i = 1; i <= ins; ++i) {
988 ir_node *cf_pred = block->in[i];
989 ir_node *prevCfOp = skip_Proj(cf_pred);
991 if (is_Bad(prevCfOp)) {
992 /* In case a Cond has been optimized we would get right to the start block
993 with an invalid definition. */
994 nin[i-1] = new_Bad();
997 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
999 if (!is_Bad(prevBlock)) {
1000 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
1001 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
1002 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
1004 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
1006 nin[i-1] = new_Bad();
1010 /* We want to pass the Phi0 node to the constructor: this finds additional
1011 optimization possibilities.
1012 The Phi0 node either is allocated in this function, or it comes from
1013 a former call to get_r_value_internal(). In this case we may not yet
1014 exchange phi0, as this is done in mature_immBlock(). */
1016 phi0_all = block->attr.block.graph_arr[pos];
1017 if (! is_Phi0(phi0_all) ||
1018 get_irn_arity(phi0_all) != 0 ||
1019 get_nodes_block(phi0_all) != block)
1025 /* After collecting all predecessors into the array nin a new Phi node
1026 with these predecessors is created. This constructor contains an
1027 optimization: If all predecessors of the Phi node are identical it
1028 returns the only operand instead of a new Phi node. */
1029 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1031 /* In case we allocated a Phi0 node at the beginning of this procedure,
1032 we need to exchange this Phi0 with the real Phi. */
1034 exchange(phi0, res);
1035 block->attr.block.graph_arr[pos] = res;
1036 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1037 only an optimization. */
1044 * This function returns the last definition of a value. In case
1045 * this value was last defined in a previous block, Phi nodes are
1046 * inserted. If the part of the firm graph containing the definition
1047 * is not yet constructed, a dummy Phi node is returned.
1049 * @param block the current block
1050 * @param pos the value number of the value searched
1051 * @param mode the mode of this value (needed for Phi construction)
1053 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1056 /* There are 4 cases to treat.
1058 1. The block is not mature and we visit it the first time. We can not
1059 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1060 predecessors is returned. This node is added to the linked list (block
1061 attribute "phis") of the containing block to be completed when this block is
1062 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1065 2. The value is already known in this block, graph_arr[pos] is set and we
1066 visit the block the first time. We can return the value without
1067 creating any new nodes.
1069 3. The block is mature and we visit it the first time. A Phi node needs
1070 to be created (phi_merge). If the Phi is not needed, as all it's
1071 operands are the same value reaching the block through different
1072 paths, it's optimized away and the value itself is returned.
1074 4. The block is mature, and we visit it the second time. Now two
1075 subcases are possible:
1076 * The value was computed completely the last time we were here. This
1077 is the case if there is no loop. We can return the proper value.
1078 * The recursion that visited this node and set the flag did not
1079 return yet. We are computing a value in a loop and need to
1080 break the recursion. This case only happens if we visited
1081 the same block with phi_merge before, which inserted a Phi0.
1082 So we return the Phi0.
1085 /* case 4 -- already visited. */
1086 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1087 /* As phi_merge allocates a Phi0 this value is always defined. Here
1088 is the critical difference of the two algorithms. */
1089 assert(block->attr.block.graph_arr[pos]);
1090 return block->attr.block.graph_arr[pos];
1093 /* visited the first time */
1094 set_irn_visited(block, get_irg_visited(current_ir_graph));
1096 /* Get the local valid value */
1097 res = block->attr.block.graph_arr[pos];
1099 /* case 2 -- If the value is actually computed, return it. */
1103 if (block->attr.block.is_matured) { /* case 3 */
1105 /* The Phi has the same amount of ins as the corresponding block. */
1106 int ins = get_irn_arity(block);
1108 NEW_ARR_A(ir_node *, nin, ins);
1110 /* Phi merge collects the predecessors and then creates a node. */
1111 res = phi_merge(block, pos, mode, nin, ins);
1113 } else { /* case 1 */
1114 /* The block is not mature, we don't know how many in's are needed. A Phi
1115 with zero predecessors is created. Such a Phi node is called Phi0
1116 node. The Phi0 is then added to the list of Phi0 nodes in this block
1117 to be matured by mature_immBlock later.
1118 The Phi0 has to remember the pos of it's internal value. If the real
1119 Phi is computed, pos is used to update the array with the local
1121 res = new_rd_Phi0(current_ir_graph, block, mode);
1122 res->attr.phi.u.pos = pos;
1123 res->attr.phi.next = block->attr.block.phis;
1124 block->attr.block.phis = res;
1127 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1129 /* The local valid value is available now. */
1130 block->attr.block.graph_arr[pos] = res;
1133 } /* get_r_value_internal */
1135 /* ************************************************************************** */
1138 * Finalize a Block node, when all control flows are known.
1139 * Acceptable parameters are only Block nodes.
1141 void mature_immBlock(ir_node *block)
1147 assert(is_Block(block));
1148 if (!get_Block_matured(block)) {
1149 ir_graph *irg = current_ir_graph;
1151 ins = ARR_LEN(block->in) - 1;
1152 /* Fix block parameters */
1153 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1155 /* An array for building the Phi nodes. */
1156 NEW_ARR_A(ir_node *, nin, ins);
1158 /* Traverse a chain of Phi nodes attached to this block and mature
1160 for (n = block->attr.block.phis; n; n = next) {
1161 inc_irg_visited(irg);
1162 next = n->attr.phi.next;
1163 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1166 block->attr.block.is_matured = 1;
1168 /* Now, as the block is a finished Firm node, we can optimize it.
1169 Since other nodes have been allocated since the block was created
1170 we can not free the node on the obstack. Therefore we have to call
1171 optimize_in_place().
1172 Unfortunately the optimization does not change a lot, as all allocated
1173 nodes refer to the unoptimized node.
1174 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1175 block = optimize_in_place_2(block);
1176 IRN_VRFY_IRG(block, irg);
1178 } /* mature_immBlock */
1180 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
1182 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1185 ir_node *new_d_Const(dbg_info *db, tarval *con)
1187 return new_bd_Const(db, con);
1190 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
1192 return new_bd_Const_long(db, mode, value);
1193 } /* new_d_Const_long */
1195 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
1197 return new_bd_Const_type(db, con, tp);
1198 } /* new_d_Const_type */
1201 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
1204 assert(arg->op == op_Cond);
1205 arg->attr.cond.default_proj = max_proj;
1206 res = new_d_Proj(db, arg, mode_X, max_proj);
1208 } /* new_d_defaultProj */
1211 * Allocate a frag array for a node if the current graph state is phase_building.
1213 * @param irn the node for which the frag array should be allocated
1214 * @param op the opcode of the (original) node, if does not match opcode of irn,
1216 * @param frag_store the address of the frag store in irn attributes, if this
1217 * address contains a value != NULL, does nothing
1219 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
1221 if (get_opt_precise_exc_context()) {
1222 if ((current_ir_graph->phase_state == phase_building) &&
1223 (get_irn_op(irn) == op) && /* Could be optimized away. */
1224 !*frag_store) /* Could be a cse where the arr is already set. */ {
1225 *frag_store = new_frag_arr(irn);
1228 } /* firm_alloc_frag_arr */
1230 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1231 /* GL: objptr was called frame before. Frame was a bad choice for the name
1232 as the operand could as well be a pointer to a dynamic object. */
1234 return new_bd_Sel(db, current_ir_graph->current_block,
1235 store, objptr, 0, NULL, ent);
1236 } /* new_d_simpleSel */
1238 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
1240 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1242 } /* new_d_SymConst_type */
1244 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
1246 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1247 value, kind, firm_unknown_type);
1248 } /* new_d_SymConst */
1250 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
1252 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1255 ir_node *new_d_EndReg(dbg_info *db)
1257 return new_bd_EndReg(db, current_ir_graph->current_block);
1258 } /* new_d_EndReg */
1260 ir_node *new_d_EndExcept(dbg_info *db)
1262 return new_bd_EndExcept(db, current_ir_graph->current_block);
1263 } /* new_d_EndExcept */
1266 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1267 int n_outs, ir_asm_constraint *outputs, int n_clobber,
1268 ident *clobber[], ident *asm_text)
1270 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1273 /* ********************************************************************* */
1274 /* Comfortable interface with automatic Phi node construction. */
1275 /* (Uses also constructors of ?? interface, except new_Block. */
1276 /* ********************************************************************* */
1278 /* Block construction */
1279 /* immature Block without predecessors */
1280 ir_node *new_d_immBlock(dbg_info *db)
1284 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1285 /* creates a new dynamic in-array as length of in is -1 */
1286 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1288 /* macroblock head */
1291 res->attr.block.is_matured = 0;
1292 res->attr.block.is_dead = 0;
1293 res->attr.block.is_mb_head = 1;
1294 res->attr.block.irg.irg = current_ir_graph;
1295 res->attr.block.backedge = NULL;
1296 res->attr.block.in_cg = NULL;
1297 res->attr.block.cg_backedge = NULL;
1298 res->attr.block.extblk = NULL;
1299 res->attr.block.region = NULL;
1300 res->attr.block.mb_depth = 0;
1301 res->attr.block.entity = NULL;
1303 set_Block_block_visited(res, 0);
1305 /* Create and initialize array for Phi-node construction. */
1306 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1307 current_ir_graph->n_loc);
1308 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1310 /* Immature block may not be optimized! */
1311 IRN_VRFY_IRG(res, current_ir_graph);
1314 } /* new_d_immBlock */
1316 ir_node *new_immBlock(void)
1318 return new_d_immBlock(NULL);
1319 } /* new_immBlock */
1321 /* immature PartBlock with its predecessors */
1322 ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
1324 ir_node *res = new_d_immBlock(db);
1325 ir_node *blk = get_nodes_block(pred_jmp);
1327 res->in[0] = blk->in[0];
1328 assert(res->in[0] != NULL);
1329 add_immBlock_pred(res, pred_jmp);
1331 res->attr.block.is_mb_head = 0;
1332 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1335 } /* new_d_immPartBlock */
1337 ir_node *new_immPartBlock(ir_node *pred_jmp)
1339 return new_d_immPartBlock(NULL, pred_jmp);
1340 } /* new_immPartBlock */
1342 /* add an edge to a jmp/control flow node */
1343 void add_immBlock_pred(ir_node *block, ir_node *jmp)
1345 int n = ARR_LEN(block->in) - 1;
1347 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1348 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1349 assert(is_ir_node(jmp));
1351 ARR_APP1(ir_node *, block->in, jmp);
1353 hook_set_irn_n(block, n, jmp, NULL);
1354 } /* add_immBlock_pred */
1356 /* changing the current block */
1357 void set_cur_block(ir_node *target)
1359 current_ir_graph->current_block = target;
1360 } /* set_cur_block */
1362 /* ************************ */
1363 /* parameter administration */
1365 /* get a value from the parameter array from the current block by its index */
1366 ir_node *get_d_value(dbg_info *db, int pos, ir_mode *mode)
1368 ir_graph *irg = current_ir_graph;
1369 assert(get_irg_phase_state(irg) == phase_building);
1370 inc_irg_visited(irg);
1375 return get_r_value_internal(irg->current_block, pos + 1, mode);
1378 /* get a value from the parameter array from the current block by its index */
1379 ir_node *get_value(int pos, ir_mode *mode)
1381 return get_d_value(NULL, pos, mode);
1384 /* set a value at position pos in the parameter array from the current block */
1385 void set_value(int pos, ir_node *value)
1387 ir_graph *irg = current_ir_graph;
1388 assert(get_irg_phase_state(irg) == phase_building);
1390 assert(pos+1 < irg->n_loc);
1391 assert(is_ir_node(value));
1392 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1395 /* Find the value number for a node in the current block.*/
1396 int find_value(ir_node *value)
1399 ir_node *bl = current_ir_graph->current_block;
1401 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1402 if (bl->attr.block.graph_arr[i] == value)
1407 /* get the current store */
1408 ir_node *get_store(void)
1410 ir_graph *irg = current_ir_graph;
1412 assert(get_irg_phase_state(irg) == phase_building);
1413 /* GL: one could call get_value instead */
1414 inc_irg_visited(irg);
1415 return get_r_value_internal(irg->current_block, 0, mode_M);
1418 /* set the current store: handles automatic Sync construction for Load nodes */
1419 void set_store(ir_node *store)
1421 ir_node *load, *pload, *pred, *in[2];
1423 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1424 /* Beware: due to dead code elimination, a store might become a Bad node even in
1425 the construction phase. */
1426 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1428 if (get_opt_auto_create_sync()) {
1429 /* handle non-volatile Load nodes by automatically creating Sync's */
1430 load = skip_Proj(store);
1431 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1432 pred = get_Load_mem(load);
1434 if (is_Sync(pred)) {
1435 /* a Load after a Sync: move it up */
1436 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1438 set_Load_mem(load, get_memop_mem(mem));
1439 add_Sync_pred(pred, store);
1442 pload = skip_Proj(pred);
1443 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1444 /* a Load after a Load: create a new Sync */
1445 set_Load_mem(load, get_Load_mem(pload));
1449 store = new_Sync(2, in);
1454 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1457 void keep_alive(ir_node *ka)
1459 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1462 /* --- Useful access routines --- */
1463 /* Returns the current block of the current graph. To set the current
1464 block use set_cur_block. */
1465 ir_node *get_cur_block(void)
1467 return get_irg_current_block(current_ir_graph);
1468 } /* get_cur_block */
1470 /* Returns the frame type of the current graph */
1471 ir_type *get_cur_frame_type(void)
1473 return get_irg_frame_type(current_ir_graph);
1474 } /* get_cur_frame_type */
1477 /* ********************************************************************* */
1480 /* call once for each run of the library */
1481 void firm_init_cons(uninitialized_local_variable_func_t *func)
1483 default_initialize_local_variable = func;
1484 } /* firm_init_cons */
1486 void irp_finalize_cons(void)
1489 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1490 irg_finalize_cons(get_irp_irg(i));
1492 irp->phase_state = phase_high;
1493 } /* irp_finalize_cons */
1495 ir_node *new_Start(void)
1497 return new_d_Start(NULL);
1499 ir_node *new_End(void)
1501 return new_d_End(NULL);
1503 ir_node *new_Const(tarval *con)
1505 return new_d_Const(NULL, con);
1508 ir_node *new_Const_long(ir_mode *mode, long value)
1510 return new_d_Const_long(NULL, mode, value);
1513 ir_node *new_Const_type(tarval *con, ir_type *tp)
1515 return new_d_Const_type(NULL, con, tp);
1518 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1520 return new_d_SymConst_type(NULL, mode, value, kind, type);
1522 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1524 return new_d_SymConst(NULL, mode, value, kind);
1526 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1528 return new_d_simpleSel(NULL, store, objptr, ent);
1530 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1532 return new_d_Phi(NULL, arity, in, mode);
1534 ir_node *new_Sync(int arity, ir_node *in[])
1536 return new_d_Sync(NULL, arity, in);
1538 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1540 return new_d_defaultProj(NULL, arg, max_proj);
1542 ir_node *new_Bad(void)
1544 return get_irg_bad(current_ir_graph);
1546 ir_node *new_EndReg(void)
1548 return new_d_EndReg(NULL);
1550 ir_node *new_EndExcept(void)
1552 return new_d_EndExcept(NULL);
1554 ir_node *new_NoMem(void)
1556 return get_irg_no_mem(current_ir_graph);
1558 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1559 int n_outs, ir_asm_constraint *outputs,
1560 int n_clobber, ident *clobber[], ident *asm_text) {
1561 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1564 /* create a new anchor node */
1565 ir_node *new_Anchor(ir_graph *irg)
1567 ir_node *in[anchor_last];
1568 memset(in, 0, sizeof(in));
1569 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);