2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 /* when we need verifying */
49 # define IRN_VRFY_IRG(res, irg)
51 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
55 * Language dependent variable initialization callback.
57 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
59 /* creates a bd constructor for a binop */
60 #define NEW_BD_BINOP(instr) \
62 new_bd_##instr(dbg_info *db, ir_node *block, \
63 ir_node *op1, ir_node *op2, ir_mode *mode) \
67 ir_graph *irg = current_ir_graph; \
70 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
71 res = optimize_node(res); \
72 IRN_VRFY_IRG(res, irg); \
76 /* creates a bd constructor for an unop */
77 #define NEW_BD_UNOP(instr) \
79 new_bd_##instr(dbg_info *db, ir_node *block, \
80 ir_node *op, ir_mode *mode) \
83 ir_graph *irg = current_ir_graph; \
84 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
85 res = optimize_node(res); \
86 IRN_VRFY_IRG(res, irg); \
90 /* creates a bd constructor for an divop */
91 #define NEW_BD_DIVOP(instr) \
93 new_bd_##instr(dbg_info *db, ir_node *block, \
94 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
98 ir_graph *irg = current_ir_graph; \
102 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
103 res->attr.divmod.exc.pin_state = state; \
104 res->attr.divmod.resmode = mode; \
105 res->attr.divmod.no_remainder = 0; \
106 res = optimize_node(res); \
107 IRN_VRFY_IRG(res, irg); \
111 /* creates a rd constructor for a binop */
112 #define NEW_RD_BINOP(instr) \
114 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
115 ir_node *op1, ir_node *op2, ir_mode *mode) \
118 ir_graph *rem = current_ir_graph; \
119 current_ir_graph = irg; \
120 res = new_bd_##instr(db, block, op1, op2, mode); \
121 current_ir_graph = rem; \
125 /* creates a rd constructor for an unop */
126 #define NEW_RD_UNOP(instr) \
128 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
129 ir_node *op, ir_mode *mode) \
132 ir_graph *rem = current_ir_graph; \
133 current_ir_graph = irg; \
134 res = new_bd_##instr(db, block, op, mode); \
135 current_ir_graph = rem; \
139 /* creates a rd constructor for an divop */
140 #define NEW_RD_DIVOP(instr) \
142 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
143 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
146 ir_graph *rem = current_ir_graph; \
147 current_ir_graph = irg; \
148 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
149 current_ir_graph = rem; \
153 /* creates a d constructor for an binop */
154 #define NEW_D_BINOP(instr) \
156 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
157 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
160 /* creates a d constructor for an unop */
161 #define NEW_D_UNOP(instr) \
163 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
164 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
167 #include "gen_ir_cons.c.inl"
169 static ir_node *new_bd_Start(dbg_info *db, ir_node *block)
172 ir_graph *irg = current_ir_graph;
174 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
176 IRN_VRFY_IRG(res, irg);
180 static ir_node *new_bd_End(dbg_info *db, ir_node *block)
183 ir_graph *irg = current_ir_graph;
185 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
187 IRN_VRFY_IRG(res, irg);
192 * Creates a Phi node with all predecessors. Calling this constructor
193 * is only allowed if the corresponding block is mature.
195 static ir_node *new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
198 ir_graph *irg = current_ir_graph;
202 /* Don't assert that block matured: the use of this constructor is strongly
204 if (get_Block_matured(block))
205 assert(get_irn_arity(block) == arity);
207 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
209 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
211 for (i = arity - 1; i >= 0; --i)
212 if (is_Unknown(in[i])) {
217 if (!has_unknown) res = optimize_node(res);
218 IRN_VRFY_IRG(res, irg);
220 /* Memory Phis in endless loops must be kept alive.
221 As we can't distinguish these easily we keep all of them alive. */
222 if (is_Phi(res) && mode == mode_M)
223 add_End_keepalive(get_irg_end(irg), res);
227 static ir_node *new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
230 ir_graph *irg = current_ir_graph;
232 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
233 res->attr.con.tarval = con;
234 set_Const_type(res, tp); /* Call method because of complex assertion. */
235 res = optimize_node (res);
236 assert(get_Const_type(res) == tp);
237 IRN_VRFY_IRG(res, irg);
240 } /* new_bd_Const_type */
242 static ir_node *new_bd_Const(dbg_info *db, tarval *con)
244 ir_graph *irg = current_ir_graph;
246 return new_rd_Const_type(db, irg, con, firm_unknown_type);
249 static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
251 ir_graph *irg = current_ir_graph;
253 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
254 } /* new_bd_Const_long */
256 static ir_node *new_bd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
260 assert(arg->op == op_Cond);
261 arg->attr.cond.default_proj = max_proj;
262 res = new_rd_Proj(db, arg, mode_X, max_proj);
264 } /* new_bd_defaultProj */
266 static ir_node *new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store,
267 ir_node *objptr, int arity, ir_node **in,
273 ir_graph *irg = current_ir_graph;
274 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
276 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
279 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
282 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
284 * Sel's can select functions which should be of mode mode_P_code.
286 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
287 res->attr.sel.entity = ent;
288 res = optimize_node(res);
289 IRN_VRFY_IRG(res, irg);
293 static ir_node *new_bd_SymConst_type(dbg_info *db, ir_node *block,
294 ir_mode *mode, symconst_symbol value,
295 symconst_kind symkind, ir_type *tp)
297 ir_graph *irg = current_ir_graph;
298 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
300 res->attr.symc.kind = symkind;
301 res->attr.symc.sym = value;
302 res->attr.symc.tp = tp;
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
307 } /* new_bd_SymConst_type */
309 static ir_node *new_bd_Sync(dbg_info *db, ir_node *block)
312 ir_graph *irg = current_ir_graph;
314 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
315 /* no need to call optimize node here, Sync are always created with no predecessors */
316 IRN_VRFY_IRG(res, irg);
321 static ir_node *new_bd_EndReg(dbg_info *db, ir_node *block)
324 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
327 set_irg_end_reg(irg, res);
328 IRN_VRFY_IRG(res, irg);
330 } /* new_bd_EndReg */
332 static ir_node *new_bd_EndExcept(dbg_info *db, ir_node *block)
335 ir_graph *irg = current_ir_graph;
337 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
338 set_irg_end_except(irg, res);
339 IRN_VRFY_IRG (res, irg);
341 } /* new_bd_EndExcept */
343 static ir_node *new_bd_ASM(dbg_info *db, ir_node *block, int arity,
344 ir_node *in[], ir_asm_constraint *inputs, int n_outs,
345 ir_asm_constraint *outputs, int n_clobber,
346 ident *clobber[], ident *text)
349 ir_graph *irg = current_ir_graph;
351 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
352 res->attr.assem.pin_state = op_pin_state_pinned;
353 res->attr.assem.input_constraints
354 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
355 res->attr.assem.output_constraints
356 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
357 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
358 res->attr.assem.text = text;
360 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
361 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
362 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
369 /* --------------------------------------------- */
370 /* private interfaces, for professional use only */
371 /* --------------------------------------------- */
373 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
375 ir_graph *rem = current_ir_graph;
378 current_ir_graph = irg;
379 res = new_bd_Start(db, block);
380 current_ir_graph = rem;
385 ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
388 ir_graph *rem = current_ir_graph;
390 current_ir_graph = irg;
391 res = new_bd_End(db, block);
392 current_ir_graph = rem;
397 /* Creates a Phi node with all predecessors. Calling this constructor
398 is only allowed if the corresponding block is mature. */
399 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
402 ir_graph *rem = current_ir_graph;
404 current_ir_graph = get_Block_irg(block);
405 res = new_bd_Phi(db, block,arity, in, mode);
406 current_ir_graph = rem;
411 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
414 ir_graph *rem = current_ir_graph;
416 current_ir_graph = irg;
417 res = new_bd_Const_type(db, con, tp);
418 current_ir_graph = rem;
421 } /* new_rd_Const_type */
423 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
426 //#ifdef USE_ORIGINAL
427 ir_graph *rem = current_ir_graph;
429 current_ir_graph = irg;
430 res = new_bd_Const_type(db, con, firm_unknown_type);
431 current_ir_graph = rem;
433 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
439 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
441 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
442 } /* new_rd_Const_long */
444 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
446 return new_bd_defaultProj(db, arg, max_proj);
447 } /* new_rd_defaultProj */
449 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
450 ir_node *objptr, ir_entity *ent)
453 ir_graph *rem = current_ir_graph;
455 current_ir_graph = get_Block_irg(block);
456 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
457 current_ir_graph = rem;
460 } /* new_rd_simpleSel */
462 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
463 symconst_symbol value, symconst_kind symkind,
467 ir_graph *rem = current_ir_graph;
468 ir_node *block = get_irg_start_block(irg);
470 current_ir_graph = irg;
471 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
472 current_ir_graph = rem;
475 } /* new_rd_SymConst_type */
477 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
478 symconst_symbol value, symconst_kind symkind)
480 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
481 } /* new_rd_SymConst */
483 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
486 sym.entity_p = symbol;
487 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
488 } /* new_rd_SymConst_addr_ent */
490 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
493 sym.entity_p = symbol;
494 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
495 } /* new_rd_SymConst_ofs_ent */
497 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
501 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
502 } /* new_rd_SymConst_type_tag */
504 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
508 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
509 } /* new_rd_SymConst_size */
511 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
515 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
516 } /* new_rd_SymConst_align */
518 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
521 ir_graph *rem = current_ir_graph;
524 current_ir_graph = get_Block_irg(block);
525 res = new_bd_Sync(db, block);
526 current_ir_graph = rem;
528 for (i = 0; i < arity; ++i)
529 add_Sync_pred(res, in[i]);
534 ir_node *new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
538 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
539 set_irg_end_reg(irg, res);
540 IRN_VRFY_IRG(res, irg);
542 } /* new_rd_EndReg */
544 ir_node *new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
548 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
549 set_irg_end_except(irg, res);
550 IRN_VRFY_IRG (res, irg);
552 } /* new_rd_EndExcept */
554 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
555 int arity, ir_node *in[], ir_asm_constraint *inputs,
556 int n_outs, ir_asm_constraint *outputs,
557 int n_clobber, ident *clobber[], ident *text)
560 ir_graph *rem = current_ir_graph;
562 current_ir_graph = get_Block_irg(block);
563 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
564 current_ir_graph = rem;
569 ir_node *new_r_Start(ir_graph *irg, ir_node *block)
571 return new_rd_Start(NULL, irg, block);
573 ir_node *new_r_End(ir_graph *irg, ir_node *block)
575 return new_rd_End(NULL, irg, block);
577 ir_node *new_r_Const(ir_graph *irg, tarval *con)
579 return new_rd_Const(NULL, irg, con);
581 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
583 return new_rd_Const_long(NULL, irg, mode, value);
585 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
587 return new_rd_Const_type(NULL, irg, con, tp);
589 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
590 symconst_kind symkind)
592 return new_rd_SymConst(NULL, irg, mode, value, symkind);
594 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
597 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
599 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
601 return new_rd_Phi(NULL, block, arity, in, mode);
603 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
605 return new_rd_Sync(NULL, block, arity, in);
607 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
609 return new_rd_defaultProj(NULL, arg, max_proj);
611 ir_node *new_r_Bad(ir_graph *irg)
613 return get_irg_bad(irg);
615 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block)
617 return new_rd_EndReg(NULL, irg, block);
619 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block)
621 return new_rd_EndExcept(NULL, irg, block);
623 ir_node *new_r_NoMem(ir_graph *irg)
625 return get_irg_no_mem(irg);
627 ir_node *new_r_ASM(ir_node *block,
628 int arity, ir_node *in[], ir_asm_constraint *inputs,
629 int n_outs, ir_asm_constraint *outputs,
630 int n_clobber, ident *clobber[], ident *text)
632 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
635 /** ********************/
636 /** public interfaces */
637 /** construction tools */
639 ir_node *new_d_Start(dbg_info *db)
643 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
644 op_Start, mode_T, 0, NULL);
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, current_ir_graph);
651 ir_node *new_d_End(dbg_info *db)
654 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
655 op_End, mode_X, -1, NULL);
656 res = optimize_node(res);
657 IRN_VRFY_IRG(res, current_ir_graph);
662 /* ***********************************************************************/
663 /* Methods necessary for automatic Phi node creation */
665 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
666 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
667 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
668 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
670 Call Graph: ( A ---> B == A "calls" B)
672 get_value mature_immBlock
680 get_r_value_internal |
684 new_rd_Phi0 new_rd_Phi_in
686 * *************************************************************************** */
688 /** Creates a Phi node with 0 predecessors. */
689 static inline ir_node *new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
693 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
694 IRN_VRFY_IRG(res, irg);
700 * Internal constructor of a Phi node by a phi_merge operation.
702 * @param irg the graph on which the Phi will be constructed
703 * @param block the block in which the Phi will be constructed
704 * @param mode the mod eof the Phi node
705 * @param in the input array of the phi node
706 * @param ins number of elements in the input array
707 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
708 * the value for which the new Phi is constructed
710 static inline ir_node *new_rd_Phi_in(ir_graph *irg, ir_node *block,
711 ir_mode *mode, ir_node **in, int ins,
715 ir_node *res, *known;
717 /* Allocate a new node on the obstack. The allocation copies the in
719 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
720 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
722 /* This loop checks whether the Phi has more than one predecessor.
723 If so, it is a real Phi node and we break the loop. Else the
724 Phi node merges the same definition on several paths and therefore
726 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
728 for (i = ins - 1; i >= 0; --i) {
731 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
733 /* Optimize self referencing Phis: We can't detect them yet properly, as
734 they still refer to the Phi0 they will replace. So replace right now. */
735 if (phi0 && in[i] == phi0)
738 if (in[i] == res || in[i] == known)
747 /* i < 0: there is at most one predecessor, we don't need a phi node. */
750 edges_node_deleted(res, current_ir_graph);
751 obstack_free(current_ir_graph->obst, res);
753 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
754 order, an enclosing Phi know may get superfluous. */
755 res = optimize_in_place_2(known);
757 exchange(known, res);
762 /* A undefined value, e.g., in unreachable code. */
766 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
767 IRN_VRFY_IRG(res, irg);
768 /* Memory Phis in endless loops must be kept alive.
769 As we can't distinguish these easily we keep all of them alive. */
770 if (is_Phi(res) && mode == mode_M)
771 add_End_keepalive(get_irg_end(irg), res);
775 } /* new_rd_Phi_in */
777 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
779 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
782 * Construct a new frag_array for node n.
783 * Copy the content from the current graph_arr of the corresponding block:
784 * this is the current state.
785 * Set ProjM(n) as current memory state.
786 * Further the last entry in frag_arr of current block points to n. This
787 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
789 static inline ir_node **new_frag_arr(ir_node *n)
794 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
795 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
796 sizeof(ir_node *)*current_ir_graph->n_loc);
798 /* turn off optimization before allocating Proj nodes, as res isn't
800 opt = get_opt_optimize(); set_optimize(0);
801 /* Here we rely on the fact that all frag ops have Memory as first result! */
803 arr[0] = new_Proj(n, mode_M, pn_Call_M);
804 } else if (is_CopyB(n)) {
805 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
807 assert((pn_Quot_M == pn_DivMod_M) &&
808 (pn_Quot_M == pn_Div_M) &&
809 (pn_Quot_M == pn_Mod_M) &&
810 (pn_Quot_M == pn_Load_M) &&
811 (pn_Quot_M == pn_Store_M) &&
812 (pn_Quot_M == pn_Alloc_M) &&
813 (pn_Quot_M == pn_Bound_M));
814 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
818 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
823 * Returns the frag_arr from a node.
825 static inline ir_node **get_frag_arr(ir_node *n)
827 switch (get_irn_opcode(n)) {
829 return n->attr.call.exc.frag_arr;
831 return n->attr.alloc.exc.frag_arr;
833 return n->attr.load.exc.frag_arr;
835 return n->attr.store.exc.frag_arr;
837 return n->attr.except.frag_arr;
841 static void set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
846 for (i = 1024; i >= 0; --i)
851 if (frag_arr[pos] == NULL)
853 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
854 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
855 assert(arr != frag_arr && "Endless recursion detected");
860 assert(!"potential endless recursion in set_frag_value");
861 } /* set_frag_value */
863 static ir_node *get_r_frag_value_internal(ir_node *block, ir_node *cfOp,
864 int pos, ir_mode *mode)
869 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
871 frag_arr = get_frag_arr(cfOp);
874 if (block->attr.block.graph_arr[pos] != NULL) {
875 /* There was a set_value() after the cfOp and no get_value() before that
876 set_value(). We must build a Phi node now. */
877 if (block->attr.block.is_matured) {
878 int ins = get_irn_arity(block);
880 NEW_ARR_A(ir_node *, nin, ins);
881 res = phi_merge(block, pos, mode, nin, ins);
883 res = new_rd_Phi0(current_ir_graph, block, mode);
884 res->attr.phi.u.pos = pos;
885 res->attr.phi.next = block->attr.block.phis;
886 block->attr.block.phis = res;
889 /* It's a Phi, we can write this into all graph_arrs with NULL */
890 set_frag_value(block->attr.block.graph_arr, pos, res);
892 res = get_r_value_internal(block, pos, mode);
893 set_frag_value(block->attr.block.graph_arr, pos, res);
897 } /* get_r_frag_value_internal */
900 * Check whether a control flownode cf_pred represents an exception flow.
902 * @param cf_pred the control flow node
903 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
905 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
908 * Note: all projections from a raise are "exceptional control flow" we we handle it
909 * like a normal Jmp, because there is no "regular" one.
910 * That's why Raise is no "fragile_op"!
912 if (is_fragile_op(prev_cf_op)) {
913 if (is_Proj(cf_pred)) {
914 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
915 /* the regular control flow, NO exception */
918 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
921 /* Hmm, exception but not a Proj? */
922 panic("unexpected condition: fragile op without a proj");
925 } /* is_exception_flow */
928 * Computes the predecessors for the real phi node, and then
929 * allocates and returns this node. The routine called to allocate the
930 * node might optimize it away and return a real value.
931 * This function must be called with an in-array of proper size.
933 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
935 ir_node *prevBlock, *res, *phi0, *phi0_all;
938 /* If this block has no value at pos create a Phi0 and remember it
939 in graph_arr to break recursions.
940 Else we may not set graph_arr as there a later value is remembered. */
942 if (block->attr.block.graph_arr[pos] == NULL) {
943 ir_graph *irg = current_ir_graph;
945 if (block == get_irg_start_block(irg)) {
946 /* Collapsing to Bad tarvals is no good idea.
947 So we call a user-supplied routine here that deals with this case as
948 appropriate for the given language. Sorrily the only help we can give
949 here is the position.
951 Even if all variables are defined before use, it can happen that
952 we get to the start block, if a Cond has been replaced by a tuple
953 (bad, jmp). In this case we call the function needlessly, eventually
954 generating an non existent error.
955 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
958 if (default_initialize_local_variable != NULL) {
959 ir_node *rem = get_cur_block();
961 set_cur_block(block);
962 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
966 block->attr.block.graph_arr[pos] = new_Unknown(mode);
967 /* We don't need to care about exception ops in the start block.
968 There are none by definition. */
969 return block->attr.block.graph_arr[pos];
971 phi0 = new_rd_Phi0(irg, block, mode);
972 block->attr.block.graph_arr[pos] = phi0;
973 if (get_opt_precise_exc_context()) {
974 /* Set graph_arr for fragile ops. Also here we should break recursion.
975 We could choose a cyclic path through an cfop. But the recursion would
976 break at some point. */
977 set_frag_value(block->attr.block.graph_arr, pos, phi0);
982 /* This loop goes to all predecessor blocks of the block the Phi node
983 is in and there finds the operands of the Phi node by calling
984 get_r_value_internal. */
985 for (i = 1; i <= ins; ++i) {
986 ir_node *cf_pred = block->in[i];
987 ir_node *prevCfOp = skip_Proj(cf_pred);
989 if (is_Bad(prevCfOp)) {
990 /* In case a Cond has been optimized we would get right to the start block
991 with an invalid definition. */
992 nin[i-1] = new_Bad();
995 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
997 if (!is_Bad(prevBlock)) {
998 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
999 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
1000 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
1002 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
1004 nin[i-1] = new_Bad();
1008 /* We want to pass the Phi0 node to the constructor: this finds additional
1009 optimization possibilities.
1010 The Phi0 node either is allocated in this function, or it comes from
1011 a former call to get_r_value_internal(). In this case we may not yet
1012 exchange phi0, as this is done in mature_immBlock(). */
1014 phi0_all = block->attr.block.graph_arr[pos];
1015 if (! is_Phi0(phi0_all) ||
1016 get_irn_arity(phi0_all) != 0 ||
1017 get_nodes_block(phi0_all) != block)
1023 /* After collecting all predecessors into the array nin a new Phi node
1024 with these predecessors is created. This constructor contains an
1025 optimization: If all predecessors of the Phi node are identical it
1026 returns the only operand instead of a new Phi node. */
1027 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1029 /* In case we allocated a Phi0 node at the beginning of this procedure,
1030 we need to exchange this Phi0 with the real Phi. */
1032 exchange(phi0, res);
1033 block->attr.block.graph_arr[pos] = res;
1034 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1035 only an optimization. */
1042 * This function returns the last definition of a value. In case
1043 * this value was last defined in a previous block, Phi nodes are
1044 * inserted. If the part of the firm graph containing the definition
1045 * is not yet constructed, a dummy Phi node is returned.
1047 * @param block the current block
1048 * @param pos the value number of the value searched
1049 * @param mode the mode of this value (needed for Phi construction)
1051 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1054 /* There are 4 cases to treat.
1056 1. The block is not mature and we visit it the first time. We can not
1057 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1058 predecessors is returned. This node is added to the linked list (block
1059 attribute "phis") of the containing block to be completed when this block is
1060 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1063 2. The value is already known in this block, graph_arr[pos] is set and we
1064 visit the block the first time. We can return the value without
1065 creating any new nodes.
1067 3. The block is mature and we visit it the first time. A Phi node needs
1068 to be created (phi_merge). If the Phi is not needed, as all it's
1069 operands are the same value reaching the block through different
1070 paths, it's optimized away and the value itself is returned.
1072 4. The block is mature, and we visit it the second time. Now two
1073 subcases are possible:
1074 * The value was computed completely the last time we were here. This
1075 is the case if there is no loop. We can return the proper value.
1076 * The recursion that visited this node and set the flag did not
1077 return yet. We are computing a value in a loop and need to
1078 break the recursion. This case only happens if we visited
1079 the same block with phi_merge before, which inserted a Phi0.
1080 So we return the Phi0.
1083 /* case 4 -- already visited. */
1084 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1085 /* As phi_merge allocates a Phi0 this value is always defined. Here
1086 is the critical difference of the two algorithms. */
1087 assert(block->attr.block.graph_arr[pos]);
1088 return block->attr.block.graph_arr[pos];
1091 /* visited the first time */
1092 set_irn_visited(block, get_irg_visited(current_ir_graph));
1094 /* Get the local valid value */
1095 res = block->attr.block.graph_arr[pos];
1097 /* case 2 -- If the value is actually computed, return it. */
1101 if (block->attr.block.is_matured) { /* case 3 */
1103 /* The Phi has the same amount of ins as the corresponding block. */
1104 int ins = get_irn_arity(block);
1106 NEW_ARR_A(ir_node *, nin, ins);
1108 /* Phi merge collects the predecessors and then creates a node. */
1109 res = phi_merge(block, pos, mode, nin, ins);
1111 } else { /* case 1 */
1112 /* The block is not mature, we don't know how many in's are needed. A Phi
1113 with zero predecessors is created. Such a Phi node is called Phi0
1114 node. The Phi0 is then added to the list of Phi0 nodes in this block
1115 to be matured by mature_immBlock later.
1116 The Phi0 has to remember the pos of it's internal value. If the real
1117 Phi is computed, pos is used to update the array with the local
1119 res = new_rd_Phi0(current_ir_graph, block, mode);
1120 res->attr.phi.u.pos = pos;
1121 res->attr.phi.next = block->attr.block.phis;
1122 block->attr.block.phis = res;
1125 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1127 /* The local valid value is available now. */
1128 block->attr.block.graph_arr[pos] = res;
1131 } /* get_r_value_internal */
1133 /* ************************************************************************** */
1136 * Finalize a Block node, when all control flows are known.
1137 * Acceptable parameters are only Block nodes.
1139 void mature_immBlock(ir_node *block)
1145 assert(is_Block(block));
1146 if (!get_Block_matured(block)) {
1147 ir_graph *irg = current_ir_graph;
1149 ins = ARR_LEN(block->in) - 1;
1150 /* Fix block parameters */
1151 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1153 /* An array for building the Phi nodes. */
1154 NEW_ARR_A(ir_node *, nin, ins);
1156 /* Traverse a chain of Phi nodes attached to this block and mature
1158 for (n = block->attr.block.phis; n; n = next) {
1159 inc_irg_visited(irg);
1160 next = n->attr.phi.next;
1161 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1164 block->attr.block.is_matured = 1;
1166 /* Now, as the block is a finished Firm node, we can optimize it.
1167 Since other nodes have been allocated since the block was created
1168 we can not free the node on the obstack. Therefore we have to call
1169 optimize_in_place().
1170 Unfortunately the optimization does not change a lot, as all allocated
1171 nodes refer to the unoptimized node.
1172 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1173 block = optimize_in_place_2(block);
1174 IRN_VRFY_IRG(block, irg);
1176 } /* mature_immBlock */
1178 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
1180 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1183 ir_node *new_d_Const(dbg_info *db, tarval *con)
1185 return new_bd_Const(db, con);
1188 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
1190 return new_bd_Const_long(db, mode, value);
1191 } /* new_d_Const_long */
1193 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
1195 return new_bd_Const_type(db, con, tp);
1196 } /* new_d_Const_type */
1199 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
1202 assert(arg->op == op_Cond);
1203 arg->attr.cond.default_proj = max_proj;
1204 res = new_d_Proj(db, arg, mode_X, max_proj);
1206 } /* new_d_defaultProj */
1209 * Allocate a frag array for a node if the current graph state is phase_building.
1211 * @param irn the node for which the frag array should be allocated
1212 * @param op the opcode of the (original) node, if does not match opcode of irn,
1214 * @param frag_store the address of the frag store in irn attributes, if this
1215 * address contains a value != NULL, does nothing
1217 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
1219 if (get_opt_precise_exc_context()) {
1220 if ((current_ir_graph->phase_state == phase_building) &&
1221 (get_irn_op(irn) == op) && /* Could be optimized away. */
1222 !*frag_store) /* Could be a cse where the arr is already set. */ {
1223 *frag_store = new_frag_arr(irn);
1226 } /* firm_alloc_frag_arr */
1228 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1229 /* GL: objptr was called frame before. Frame was a bad choice for the name
1230 as the operand could as well be a pointer to a dynamic object. */
1232 return new_bd_Sel(db, current_ir_graph->current_block,
1233 store, objptr, 0, NULL, ent);
1234 } /* new_d_simpleSel */
1236 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
1238 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1240 } /* new_d_SymConst_type */
1242 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
1244 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1245 value, kind, firm_unknown_type);
1246 } /* new_d_SymConst */
1248 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
1250 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1253 ir_node *new_d_EndReg(dbg_info *db)
1255 return new_bd_EndReg(db, current_ir_graph->current_block);
1256 } /* new_d_EndReg */
1258 ir_node *new_d_EndExcept(dbg_info *db)
1260 return new_bd_EndExcept(db, current_ir_graph->current_block);
1261 } /* new_d_EndExcept */
1264 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1265 int n_outs, ir_asm_constraint *outputs, int n_clobber,
1266 ident *clobber[], ident *text)
1268 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
1271 /* ********************************************************************* */
1272 /* Comfortable interface with automatic Phi node construction. */
1273 /* (Uses also constructors of ?? interface, except new_Block. */
1274 /* ********************************************************************* */
1276 /* Block construction */
1277 /* immature Block without predecessors */
1278 ir_node *new_d_immBlock(dbg_info *db)
1282 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1283 /* creates a new dynamic in-array as length of in is -1 */
1284 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1286 /* macroblock head */
1289 res->attr.block.is_matured = 0;
1290 res->attr.block.is_dead = 0;
1291 res->attr.block.is_mb_head = 1;
1292 res->attr.block.irg.irg = current_ir_graph;
1293 res->attr.block.backedge = NULL;
1294 res->attr.block.in_cg = NULL;
1295 res->attr.block.cg_backedge = NULL;
1296 res->attr.block.extblk = NULL;
1297 res->attr.block.region = NULL;
1298 res->attr.block.mb_depth = 0;
1299 res->attr.block.entity = NULL;
1301 set_Block_block_visited(res, 0);
1303 /* Create and initialize array for Phi-node construction. */
1304 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1305 current_ir_graph->n_loc);
1306 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1308 /* Immature block may not be optimized! */
1309 IRN_VRFY_IRG(res, current_ir_graph);
1312 } /* new_d_immBlock */
1314 ir_node *new_immBlock(void)
1316 return new_d_immBlock(NULL);
1317 } /* new_immBlock */
1319 /* immature PartBlock with its predecessors */
1320 ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
1322 ir_node *res = new_d_immBlock(db);
1323 ir_node *blk = get_nodes_block(pred_jmp);
1325 res->in[0] = blk->in[0];
1326 assert(res->in[0] != NULL);
1327 add_immBlock_pred(res, pred_jmp);
1329 res->attr.block.is_mb_head = 0;
1330 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1333 } /* new_d_immPartBlock */
1335 ir_node *new_immPartBlock(ir_node *pred_jmp)
1337 return new_d_immPartBlock(NULL, pred_jmp);
1338 } /* new_immPartBlock */
1340 /* add an edge to a jmp/control flow node */
1341 void add_immBlock_pred(ir_node *block, ir_node *jmp)
1343 int n = ARR_LEN(block->in) - 1;
1345 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1346 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1347 assert(is_ir_node(jmp));
1349 ARR_APP1(ir_node *, block->in, jmp);
1351 hook_set_irn_n(block, n, jmp, NULL);
1352 } /* add_immBlock_pred */
1354 /* changing the current block */
1355 void set_cur_block(ir_node *target)
1357 current_ir_graph->current_block = target;
1358 } /* set_cur_block */
1360 /* ************************ */
1361 /* parameter administration */
1363 /* get a value from the parameter array from the current block by its index */
1364 ir_node *get_d_value(dbg_info *db, int pos, ir_mode *mode)
1366 ir_graph *irg = current_ir_graph;
1367 assert(get_irg_phase_state(irg) == phase_building);
1368 inc_irg_visited(irg);
1373 return get_r_value_internal(irg->current_block, pos + 1, mode);
1376 /* get a value from the parameter array from the current block by its index */
1377 ir_node *get_value(int pos, ir_mode *mode)
1379 return get_d_value(NULL, pos, mode);
1382 /* set a value at position pos in the parameter array from the current block */
1383 void set_value(int pos, ir_node *value)
1385 ir_graph *irg = current_ir_graph;
1386 assert(get_irg_phase_state(irg) == phase_building);
1388 assert(pos+1 < irg->n_loc);
1389 assert(is_ir_node(value));
1390 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1393 /* Find the value number for a node in the current block.*/
1394 int find_value(ir_node *value)
1397 ir_node *bl = current_ir_graph->current_block;
1399 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1400 if (bl->attr.block.graph_arr[i] == value)
1405 /* get the current store */
1406 ir_node *get_store(void)
1408 ir_graph *irg = current_ir_graph;
1410 assert(get_irg_phase_state(irg) == phase_building);
1411 /* GL: one could call get_value instead */
1412 inc_irg_visited(irg);
1413 return get_r_value_internal(irg->current_block, 0, mode_M);
1416 /* set the current store: handles automatic Sync construction for Load nodes */
1417 void set_store(ir_node *store)
1419 ir_node *load, *pload, *pred, *in[2];
1421 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1422 /* Beware: due to dead code elimination, a store might become a Bad node even in
1423 the construction phase. */
1424 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1426 if (get_opt_auto_create_sync()) {
1427 /* handle non-volatile Load nodes by automatically creating Sync's */
1428 load = skip_Proj(store);
1429 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1430 pred = get_Load_mem(load);
1432 if (is_Sync(pred)) {
1433 /* a Load after a Sync: move it up */
1434 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1436 set_Load_mem(load, get_memop_mem(mem));
1437 add_Sync_pred(pred, store);
1440 pload = skip_Proj(pred);
1441 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1442 /* a Load after a Load: create a new Sync */
1443 set_Load_mem(load, get_Load_mem(pload));
1447 store = new_Sync(2, in);
1452 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1455 void keep_alive(ir_node *ka)
1457 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1460 /* --- Useful access routines --- */
1461 /* Returns the current block of the current graph. To set the current
1462 block use set_cur_block. */
1463 ir_node *get_cur_block(void)
1465 return get_irg_current_block(current_ir_graph);
1466 } /* get_cur_block */
1468 /* Returns the frame type of the current graph */
1469 ir_type *get_cur_frame_type(void)
1471 return get_irg_frame_type(current_ir_graph);
1472 } /* get_cur_frame_type */
1475 /* ********************************************************************* */
1478 /* call once for each run of the library */
1479 void firm_init_cons(uninitialized_local_variable_func_t *func)
1481 default_initialize_local_variable = func;
1482 } /* firm_init_cons */
1484 void irp_finalize_cons(void)
1487 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1488 irg_finalize_cons(get_irp_irg(i));
1490 irp->phase_state = phase_high;
1491 } /* irp_finalize_cons */
1493 ir_node *new_Start(void)
1495 return new_d_Start(NULL);
1497 ir_node *new_End(void)
1499 return new_d_End(NULL);
1501 ir_node *new_Const(tarval *con)
1503 return new_d_Const(NULL, con);
1506 ir_node *new_Const_long(ir_mode *mode, long value)
1508 return new_d_Const_long(NULL, mode, value);
1511 ir_node *new_Const_type(tarval *con, ir_type *tp)
1513 return new_d_Const_type(NULL, con, tp);
1516 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1518 return new_d_SymConst_type(NULL, mode, value, kind, type);
1520 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1522 return new_d_SymConst(NULL, mode, value, kind);
1524 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1526 return new_d_simpleSel(NULL, store, objptr, ent);
1528 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1530 return new_d_Phi(NULL, arity, in, mode);
1532 ir_node *new_Sync(int arity, ir_node *in[])
1534 return new_d_Sync(NULL, arity, in);
1536 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1538 return new_d_defaultProj(NULL, arg, max_proj);
1540 ir_node *new_Bad(void)
1542 return get_irg_bad(current_ir_graph);
1544 ir_node *new_EndReg(void)
1546 return new_d_EndReg(NULL);
1548 ir_node *new_EndExcept(void)
1550 return new_d_EndExcept(NULL);
1552 ir_node *new_NoMem(void)
1554 return get_irg_no_mem(current_ir_graph);
1556 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1557 int n_outs, ir_asm_constraint *outputs,
1558 int n_clobber, ident *clobber[], ident *text)
1560 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
1563 /* create a new anchor node */
1564 ir_node *new_Anchor(ir_graph *irg)
1566 ir_node *in[anchor_last];
1567 memset(in, 0, sizeof(in));
1568 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);