2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
46 /* when we need verifying */
48 # define IRN_VRFY_IRG(res, irg)
50 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
54 * Language dependent variable initialization callback.
56 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
58 /* creates a bd constructor for a binop */
59 #define NEW_BD_BINOP(instr) \
61 new_bd_##instr(dbg_info *db, ir_node *block, \
62 ir_node *op1, ir_node *op2, ir_mode *mode) \
66 ir_graph *irg = current_ir_graph; \
69 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
70 res = optimize_node(res); \
71 IRN_VRFY_IRG(res, irg); \
75 /* creates a bd constructor for an unop */
76 #define NEW_BD_UNOP(instr) \
78 new_bd_##instr(dbg_info *db, ir_node *block, \
79 ir_node *op, ir_mode *mode) \
82 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an divop */
90 #define NEW_BD_DIVOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
97 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
102 res->attr.divmod.exc.pin_state = state; \
103 res->attr.divmod.resmode = mode; \
104 res->attr.divmod.no_remainder = 0; \
105 res = optimize_node(res); \
106 IRN_VRFY_IRG(res, irg); \
110 /* creates a rd constructor for a binop */
111 #define NEW_RD_BINOP(instr) \
113 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
114 ir_node *op1, ir_node *op2, ir_mode *mode) \
117 ir_graph *rem = current_ir_graph; \
118 current_ir_graph = irg; \
119 res = new_bd_##instr(db, block, op1, op2, mode); \
120 current_ir_graph = rem; \
124 /* creates a rd constructor for an unop */
125 #define NEW_RD_UNOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an divop */
139 #define NEW_RD_DIVOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
148 current_ir_graph = rem; \
152 /* creates a d constructor for an binop */
153 #define NEW_D_BINOP(instr) \
155 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
156 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
159 /* creates a d constructor for an unop */
160 #define NEW_D_UNOP(instr) \
162 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
163 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
166 #include "gen_ir_cons.c.inl"
169 new_bd_Start(dbg_info *db, ir_node *block) {
171 ir_graph *irg = current_ir_graph;
173 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
175 IRN_VRFY_IRG(res, irg);
180 new_bd_End(dbg_info *db, ir_node *block) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
186 IRN_VRFY_IRG(res, irg);
191 * Creates a Phi node with all predecessors. Calling this constructor
192 * is only allowed if the corresponding block is mature.
195 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
197 ir_graph *irg = current_ir_graph;
201 /* Don't assert that block matured: the use of this constructor is strongly
203 if (get_Block_matured(block))
204 assert(get_irn_arity(block) == arity);
206 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
208 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
210 for (i = arity - 1; i >= 0; --i)
211 if (is_Unknown(in[i])) {
216 if (!has_unknown) res = optimize_node(res);
217 IRN_VRFY_IRG(res, irg);
219 /* Memory Phis in endless loops must be kept alive.
220 As we can't distinguish these easily we keep all of them alive. */
221 if (is_Phi(res) && mode == mode_M)
222 add_End_keepalive(get_irg_end(irg), res);
227 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
229 ir_graph *irg = current_ir_graph;
231 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
232 res->attr.con.tv = con;
233 set_Const_type(res, tp); /* Call method because of complex assertion. */
234 res = optimize_node (res);
235 assert(get_Const_type(res) == tp);
236 IRN_VRFY_IRG(res, irg);
239 } /* new_bd_Const_type */
242 new_bd_Const(dbg_info *db, tarval *con) {
243 ir_graph *irg = current_ir_graph;
245 return new_rd_Const_type(db, irg, con, firm_unknown_type);
249 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
250 ir_graph *irg = current_ir_graph;
252 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
253 } /* new_bd_Const_long */
256 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
260 assert(arg->op == op_Cond);
261 arg->attr.cond.default_proj = max_proj;
262 res = new_rd_Proj(db, block, arg, mode_X, max_proj);
264 } /* new_bd_defaultProj */
267 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
268 int arity, ir_node **in, ir_entity *ent) {
272 ir_graph *irg = current_ir_graph;
273 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
275 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
278 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
281 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
283 * Sel's can select functions which should be of mode mode_P_code.
285 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
286 res->attr.sel.entity = ent;
287 res = optimize_node(res);
288 IRN_VRFY_IRG(res, irg);
293 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
294 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
295 ir_graph *irg = current_ir_graph;
296 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
298 res->attr.symc.kind = symkind;
299 res->attr.symc.sym = value;
300 res->attr.symc.tp = tp;
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
305 } /* new_bd_SymConst_type */
308 new_bd_Sync(dbg_info *db, ir_node *block) {
310 ir_graph *irg = current_ir_graph;
312 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
313 /* no need to call optimize node here, Sync are always created with no predecessors */
314 IRN_VRFY_IRG(res, irg);
320 new_bd_EndReg(dbg_info *db, ir_node *block) {
322 ir_graph *irg = current_ir_graph;
324 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
325 set_irg_end_reg(irg, res);
326 IRN_VRFY_IRG(res, irg);
328 } /* new_bd_EndReg */
331 new_bd_EndExcept(dbg_info *db, ir_node *block) {
333 ir_graph *irg = current_ir_graph;
335 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
336 set_irg_end_except(irg, res);
337 IRN_VRFY_IRG (res, irg);
339 } /* new_bd_EndExcept */
342 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
343 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
348 res->attr.assem.pin_state = op_pin_state_pinned;
349 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
350 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
351 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
352 res->attr.assem.asm_text = asm_text;
354 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
355 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
356 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
358 res = optimize_node(res);
359 IRN_VRFY_IRG(res, irg);
363 /* --------------------------------------------- */
364 /* private interfaces, for professional use only */
365 /* --------------------------------------------- */
368 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
369 ir_graph *rem = current_ir_graph;
372 current_ir_graph = irg;
373 res = new_bd_Start(db, block);
374 current_ir_graph = rem;
380 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
382 ir_graph *rem = current_ir_graph;
384 current_ir_graph = irg;
385 res = new_bd_End(db, block);
386 current_ir_graph = rem;
391 /* Creates a Phi node with all predecessors. Calling this constructor
392 is only allowed if the corresponding block is mature. */
394 new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
396 ir_graph *rem = current_ir_graph;
398 current_ir_graph = get_Block_irg(block);
399 res = new_bd_Phi(db, block,arity, in, mode);
400 current_ir_graph = rem;
406 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
408 ir_graph *rem = current_ir_graph;
410 current_ir_graph = irg;
411 res = new_bd_Const_type(db, con, tp);
412 current_ir_graph = rem;
415 } /* new_rd_Const_type */
418 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
420 //#ifdef USE_ORIGINAL
421 ir_graph *rem = current_ir_graph;
423 current_ir_graph = irg;
424 res = new_bd_Const_type(db, con, firm_unknown_type);
425 current_ir_graph = rem;
427 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
434 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
435 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
436 } /* new_rd_Const_long */
439 new_rd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg, long max_proj) {
441 ir_graph *rem = current_ir_graph;
443 current_ir_graph = get_Block_irg(block);
444 res = new_bd_defaultProj(db, block, arg, max_proj);
445 current_ir_graph = rem;
448 } /* new_rd_defaultProj */
451 new_rd_simpleSel(dbg_info *db, ir_node *block,
452 ir_node *store, ir_node *objptr, ir_entity *ent) {
454 ir_graph *rem = current_ir_graph;
456 current_ir_graph = get_Block_irg(block);
457 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
458 current_ir_graph = rem;
461 } /* new_rd_simpleSel */
464 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
465 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
467 ir_graph *rem = current_ir_graph;
468 ir_node *block = get_irg_start_block(irg);
470 current_ir_graph = irg;
471 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
472 current_ir_graph = rem;
475 } /* new_rd_SymConst_type */
478 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
479 symconst_symbol value, symconst_kind symkind) {
480 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
481 } /* new_rd_SymConst */
483 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
485 sym.entity_p = symbol;
486 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
487 } /* new_rd_SymConst_addr_ent */
489 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
491 sym.entity_p = symbol;
492 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
493 } /* new_rd_SymConst_ofs_ent */
495 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
497 sym.ident_p = symbol;
498 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_name, tp);
499 } /* new_rd_SymConst_addr_name */
501 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
504 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
505 } /* new_rd_SymConst_type_tag */
507 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
510 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
511 } /* new_rd_SymConst_size */
513 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
516 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
517 } /* new_rd_SymConst_align */
520 new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[]) {
522 ir_graph *rem = current_ir_graph;
525 current_ir_graph = get_Block_irg(block);
526 res = new_bd_Sync(db, block);
527 current_ir_graph = rem;
529 for (i = 0; i < arity; ++i)
530 add_Sync_pred(res, in[i]);
536 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
539 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
540 set_irg_end_reg(irg, res);
541 IRN_VRFY_IRG(res, irg);
543 } /* new_rd_EndReg */
546 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
549 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
550 set_irg_end_except(irg, res);
551 IRN_VRFY_IRG (res, irg);
553 } /* new_rd_EndExcept */
555 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
556 int arity, ir_node *in[], ir_asm_constraint *inputs,
557 int n_outs, ir_asm_constraint *outputs,
558 int n_clobber, ident *clobber[], ident *asm_text) {
560 ir_graph *rem = current_ir_graph;
562 current_ir_graph = get_Block_irg(block);
563 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
564 current_ir_graph = rem;
569 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
570 return new_rd_Start(NULL, irg, block);
572 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
573 return new_rd_End(NULL, irg, block);
575 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
576 return new_rd_Const(NULL, irg, con);
578 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
579 return new_rd_Const_long(NULL, irg, mode, value);
581 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
582 return new_rd_Const_type(NULL, irg, con, tp);
584 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode,
585 symconst_symbol value, symconst_kind symkind) {
586 return new_rd_SymConst(NULL, irg, mode, value, symkind);
588 ir_node *new_r_simpleSel(ir_node *block, ir_node *store,
589 ir_node *objptr, ir_entity *ent) {
590 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
592 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode) {
593 return new_rd_Phi(NULL, block, arity, in, mode);
595 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[]) {
596 return new_rd_Sync(NULL, block, arity, in);
598 ir_node *new_r_defaultProj(ir_node *block, ir_node *arg, long max_proj) {
599 return new_rd_defaultProj(NULL, block, arg, max_proj);
601 ir_node *new_r_Bad(ir_graph *irg) {
602 return get_irg_bad(irg);
604 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
605 return new_rd_EndReg(NULL, irg, block);
607 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
608 return new_rd_EndExcept(NULL, irg, block);
610 ir_node *new_r_NoMem(ir_graph *irg) {
611 return get_irg_no_mem(irg);
613 ir_node *new_r_ASM(ir_node *block,
614 int arity, ir_node *in[], ir_asm_constraint *inputs,
615 int n_outs, ir_asm_constraint *outputs,
616 int n_clobber, ident *clobber[], ident *asm_text) {
617 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
620 /** ********************/
621 /** public interfaces */
622 /** construction tools */
625 new_d_Start(dbg_info *db) {
628 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
629 op_Start, mode_T, 0, NULL);
631 res = optimize_node(res);
632 IRN_VRFY_IRG(res, current_ir_graph);
637 new_d_End(dbg_info *db) {
639 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
640 op_End, mode_X, -1, NULL);
641 res = optimize_node(res);
642 IRN_VRFY_IRG(res, current_ir_graph);
647 /* ***********************************************************************/
648 /* Methods necessary for automatic Phi node creation */
650 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
651 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
652 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
653 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
655 Call Graph: ( A ---> B == A "calls" B)
657 get_value mature_immBlock
665 get_r_value_internal |
669 new_rd_Phi0 new_rd_Phi_in
671 * *************************************************************************** */
673 /** Creates a Phi node with 0 predecessors. */
674 static inline ir_node *
675 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
678 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
679 IRN_VRFY_IRG(res, irg);
685 * Internal constructor of a Phi node by a phi_merge operation.
687 * @param irg the graph on which the Phi will be constructed
688 * @param block the block in which the Phi will be constructed
689 * @param mode the mod eof the Phi node
690 * @param in the input array of the phi node
691 * @param ins number of elements in the input array
692 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
693 * the value for which the new Phi is constructed
695 static inline ir_node *
696 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
697 ir_node **in, int ins, ir_node *phi0) {
699 ir_node *res, *known;
701 /* Allocate a new node on the obstack. The allocation copies the in
703 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
704 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
706 /* This loop checks whether the Phi has more than one predecessor.
707 If so, it is a real Phi node and we break the loop. Else the
708 Phi node merges the same definition on several paths and therefore
710 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
712 for (i = ins - 1; i >= 0; --i) {
715 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
717 /* Optimize self referencing Phis: We can't detect them yet properly, as
718 they still refer to the Phi0 they will replace. So replace right now. */
719 if (phi0 && in[i] == phi0)
722 if (in[i] == res || in[i] == known)
731 /* i < 0: there is at most one predecessor, we don't need a phi node. */
734 edges_node_deleted(res, current_ir_graph);
735 obstack_free(current_ir_graph->obst, res);
737 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
738 order, an enclosing Phi know may get superfluous. */
739 res = optimize_in_place_2(known);
741 exchange(known, res);
746 /* A undefined value, e.g., in unreachable code. */
750 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
751 IRN_VRFY_IRG(res, irg);
752 /* Memory Phis in endless loops must be kept alive.
753 As we can't distinguish these easily we keep all of them alive. */
754 if (is_Phi(res) && mode == mode_M)
755 add_End_keepalive(get_irg_end(irg), res);
759 } /* new_rd_Phi_in */
762 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
765 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
768 * Construct a new frag_array for node n.
769 * Copy the content from the current graph_arr of the corresponding block:
770 * this is the current state.
771 * Set ProjM(n) as current memory state.
772 * Further the last entry in frag_arr of current block points to n. This
773 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
775 static inline ir_node **new_frag_arr(ir_node *n) {
779 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
780 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
781 sizeof(ir_node *)*current_ir_graph->n_loc);
783 /* turn off optimization before allocating Proj nodes, as res isn't
785 opt = get_opt_optimize(); set_optimize(0);
786 /* Here we rely on the fact that all frag ops have Memory as first result! */
788 arr[0] = new_Proj(n, mode_M, pn_Call_M);
789 } else if (is_CopyB(n)) {
790 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
792 assert((pn_Quot_M == pn_DivMod_M) &&
793 (pn_Quot_M == pn_Div_M) &&
794 (pn_Quot_M == pn_Mod_M) &&
795 (pn_Quot_M == pn_Load_M) &&
796 (pn_Quot_M == pn_Store_M) &&
797 (pn_Quot_M == pn_Alloc_M) &&
798 (pn_Quot_M == pn_Bound_M));
799 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
803 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
808 * Returns the frag_arr from a node.
810 static inline ir_node **get_frag_arr(ir_node *n) {
811 switch (get_irn_opcode(n)) {
813 return n->attr.call.exc.frag_arr;
815 return n->attr.alloc.exc.frag_arr;
817 return n->attr.load.exc.frag_arr;
819 return n->attr.store.exc.frag_arr;
821 return n->attr.except.frag_arr;
826 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
830 for (i = 1024; i >= 0; --i)
835 if (frag_arr[pos] == NULL)
837 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
838 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
839 assert(arr != frag_arr && "Endless recursion detected");
844 assert(!"potential endless recursion in set_frag_value");
845 } /* set_frag_value */
848 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
852 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
854 frag_arr = get_frag_arr(cfOp);
857 if (block->attr.block.graph_arr[pos] != NULL) {
858 /* There was a set_value() after the cfOp and no get_value() before that
859 set_value(). We must build a Phi node now. */
860 if (block->attr.block.is_matured) {
861 int ins = get_irn_arity(block);
863 NEW_ARR_A(ir_node *, nin, ins);
864 res = phi_merge(block, pos, mode, nin, ins);
866 res = new_rd_Phi0(current_ir_graph, block, mode);
867 res->attr.phi.u.pos = pos;
868 res->attr.phi.next = block->attr.block.phis;
869 block->attr.block.phis = res;
872 /* It's a Phi, we can write this into all graph_arrs with NULL */
873 set_frag_value(block->attr.block.graph_arr, pos, res);
875 res = get_r_value_internal(block, pos, mode);
876 set_frag_value(block->attr.block.graph_arr, pos, res);
880 } /* get_r_frag_value_internal */
883 * Check whether a control flownode cf_pred represents an exception flow.
885 * @param cf_pred the control flow node
886 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
888 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
890 * Note: all projections from a raise are "exceptional control flow" we we handle it
891 * like a normal Jmp, because there is no "regular" one.
892 * That's why Raise is no "fragile_op"!
894 if (is_fragile_op(prev_cf_op)) {
895 if (is_Proj(cf_pred)) {
896 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
897 /* the regular control flow, NO exception */
900 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
903 /* Hmm, exception but not a Proj? */
904 assert(!"unexpected condition: fragile op without a proj");
908 } /* is_exception_flow */
911 * Computes the predecessors for the real phi node, and then
912 * allocates and returns this node. The routine called to allocate the
913 * node might optimize it away and return a real value.
914 * This function must be called with an in-array of proper size.
917 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
918 ir_node *prevBlock, *res, *phi0, *phi0_all;
921 /* If this block has no value at pos create a Phi0 and remember it
922 in graph_arr to break recursions.
923 Else we may not set graph_arr as there a later value is remembered. */
925 if (block->attr.block.graph_arr[pos] == NULL) {
926 ir_graph *irg = current_ir_graph;
928 if (block == get_irg_start_block(irg)) {
929 /* Collapsing to Bad tarvals is no good idea.
930 So we call a user-supplied routine here that deals with this case as
931 appropriate for the given language. Sorrily the only help we can give
932 here is the position.
934 Even if all variables are defined before use, it can happen that
935 we get to the start block, if a Cond has been replaced by a tuple
936 (bad, jmp). In this case we call the function needlessly, eventually
937 generating an non existent error.
938 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
941 if (default_initialize_local_variable != NULL) {
942 ir_node *rem = get_cur_block();
944 set_cur_block(block);
945 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
949 block->attr.block.graph_arr[pos] = new_Unknown(mode);
950 /* We don't need to care about exception ops in the start block.
951 There are none by definition. */
952 return block->attr.block.graph_arr[pos];
954 phi0 = new_rd_Phi0(irg, block, mode);
955 block->attr.block.graph_arr[pos] = phi0;
956 if (get_opt_precise_exc_context()) {
957 /* Set graph_arr for fragile ops. Also here we should break recursion.
958 We could choose a cyclic path through an cfop. But the recursion would
959 break at some point. */
960 set_frag_value(block->attr.block.graph_arr, pos, phi0);
965 /* This loop goes to all predecessor blocks of the block the Phi node
966 is in and there finds the operands of the Phi node by calling
967 get_r_value_internal. */
968 for (i = 1; i <= ins; ++i) {
969 ir_node *cf_pred = block->in[i];
970 ir_node *prevCfOp = skip_Proj(cf_pred);
972 if (is_Bad(prevCfOp)) {
973 /* In case a Cond has been optimized we would get right to the start block
974 with an invalid definition. */
975 nin[i-1] = new_Bad();
978 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
980 if (!is_Bad(prevBlock)) {
981 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
982 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
983 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
985 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
987 nin[i-1] = new_Bad();
991 /* We want to pass the Phi0 node to the constructor: this finds additional
992 optimization possibilities.
993 The Phi0 node either is allocated in this function, or it comes from
994 a former call to get_r_value_internal(). In this case we may not yet
995 exchange phi0, as this is done in mature_immBlock(). */
997 phi0_all = block->attr.block.graph_arr[pos];
998 if (! is_Phi0(phi0_all) ||
999 get_irn_arity(phi0_all) != 0 ||
1000 get_nodes_block(phi0_all) != block)
1006 /* After collecting all predecessors into the array nin a new Phi node
1007 with these predecessors is created. This constructor contains an
1008 optimization: If all predecessors of the Phi node are identical it
1009 returns the only operand instead of a new Phi node. */
1010 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1012 /* In case we allocated a Phi0 node at the beginning of this procedure,
1013 we need to exchange this Phi0 with the real Phi. */
1015 exchange(phi0, res);
1016 block->attr.block.graph_arr[pos] = res;
1017 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1018 only an optimization. */
1025 * This function returns the last definition of a value. In case
1026 * this value was last defined in a previous block, Phi nodes are
1027 * inserted. If the part of the firm graph containing the definition
1028 * is not yet constructed, a dummy Phi node is returned.
1030 * @param block the current block
1031 * @param pos the value number of the value searched
1032 * @param mode the mode of this value (needed for Phi construction)
1035 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
1037 /* There are 4 cases to treat.
1039 1. The block is not mature and we visit it the first time. We can not
1040 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1041 predecessors is returned. This node is added to the linked list (block
1042 attribute "phis") of the containing block to be completed when this block is
1043 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1046 2. The value is already known in this block, graph_arr[pos] is set and we
1047 visit the block the first time. We can return the value without
1048 creating any new nodes.
1050 3. The block is mature and we visit it the first time. A Phi node needs
1051 to be created (phi_merge). If the Phi is not needed, as all it's
1052 operands are the same value reaching the block through different
1053 paths, it's optimized away and the value itself is returned.
1055 4. The block is mature, and we visit it the second time. Now two
1056 subcases are possible:
1057 * The value was computed completely the last time we were here. This
1058 is the case if there is no loop. We can return the proper value.
1059 * The recursion that visited this node and set the flag did not
1060 return yet. We are computing a value in a loop and need to
1061 break the recursion. This case only happens if we visited
1062 the same block with phi_merge before, which inserted a Phi0.
1063 So we return the Phi0.
1066 /* case 4 -- already visited. */
1067 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1068 /* As phi_merge allocates a Phi0 this value is always defined. Here
1069 is the critical difference of the two algorithms. */
1070 assert(block->attr.block.graph_arr[pos]);
1071 return block->attr.block.graph_arr[pos];
1074 /* visited the first time */
1075 set_irn_visited(block, get_irg_visited(current_ir_graph));
1077 /* Get the local valid value */
1078 res = block->attr.block.graph_arr[pos];
1080 /* case 2 -- If the value is actually computed, return it. */
1084 if (block->attr.block.is_matured) { /* case 3 */
1086 /* The Phi has the same amount of ins as the corresponding block. */
1087 int ins = get_irn_arity(block);
1089 NEW_ARR_A(ir_node *, nin, ins);
1091 /* Phi merge collects the predecessors and then creates a node. */
1092 res = phi_merge(block, pos, mode, nin, ins);
1094 } else { /* case 1 */
1095 /* The block is not mature, we don't know how many in's are needed. A Phi
1096 with zero predecessors is created. Such a Phi node is called Phi0
1097 node. The Phi0 is then added to the list of Phi0 nodes in this block
1098 to be matured by mature_immBlock later.
1099 The Phi0 has to remember the pos of it's internal value. If the real
1100 Phi is computed, pos is used to update the array with the local
1102 res = new_rd_Phi0(current_ir_graph, block, mode);
1103 res->attr.phi.u.pos = pos;
1104 res->attr.phi.next = block->attr.block.phis;
1105 block->attr.block.phis = res;
1108 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1110 /* The local valid value is available now. */
1111 block->attr.block.graph_arr[pos] = res;
1114 } /* get_r_value_internal */
1116 /* ************************************************************************** */
1119 * Finalize a Block node, when all control flows are known.
1120 * Acceptable parameters are only Block nodes.
1123 mature_immBlock(ir_node *block) {
1128 assert(is_Block(block));
1129 if (!get_Block_matured(block)) {
1130 ir_graph *irg = current_ir_graph;
1132 ins = ARR_LEN(block->in) - 1;
1133 /* Fix block parameters */
1134 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1136 /* An array for building the Phi nodes. */
1137 NEW_ARR_A(ir_node *, nin, ins);
1139 /* Traverse a chain of Phi nodes attached to this block and mature
1141 for (n = block->attr.block.phis; n; n = next) {
1142 inc_irg_visited(irg);
1143 next = n->attr.phi.next;
1144 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1147 block->attr.block.is_matured = 1;
1149 /* Now, as the block is a finished Firm node, we can optimize it.
1150 Since other nodes have been allocated since the block was created
1151 we can not free the node on the obstack. Therefore we have to call
1152 optimize_in_place().
1153 Unfortunately the optimization does not change a lot, as all allocated
1154 nodes refer to the unoptimized node.
1155 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1156 block = optimize_in_place_2(block);
1157 IRN_VRFY_IRG(block, irg);
1159 } /* mature_immBlock */
1162 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
1163 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1167 new_d_Const(dbg_info *db, tarval *con) {
1168 return new_bd_Const(db, con);
1172 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
1173 return new_bd_Const_long(db, mode, value);
1174 } /* new_d_Const_long */
1177 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
1178 return new_bd_Const_type(db, con, tp);
1179 } /* new_d_Const_type */
1183 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
1185 assert(arg->op == op_Cond);
1186 arg->attr.cond.default_proj = max_proj;
1187 res = new_d_Proj(db, arg, mode_X, max_proj);
1189 } /* new_d_defaultProj */
1192 * Allocate a frag array for a node if the current graph state is phase_building.
1194 * @param irn the node for which the frag array should be allocated
1195 * @param op the opcode of the (original) node, if does not match opcode of irn,
1197 * @param frag_store the address of the frag store in irn attributes, if this
1198 * address contains a value != NULL, does nothing
1200 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
1201 if (get_opt_precise_exc_context()) {
1202 if ((current_ir_graph->phase_state == phase_building) &&
1203 (get_irn_op(irn) == op) && /* Could be optimized away. */
1204 !*frag_store) /* Could be a cse where the arr is already set. */ {
1205 *frag_store = new_frag_arr(irn);
1208 } /* firm_alloc_frag_arr */
1211 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1212 /* GL: objptr was called frame before. Frame was a bad choice for the name
1213 as the operand could as well be a pointer to a dynamic object. */
1215 return new_bd_Sel(db, current_ir_graph->current_block,
1216 store, objptr, 0, NULL, ent);
1217 } /* new_d_simpleSel */
1220 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
1221 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1223 } /* new_d_SymConst_type */
1226 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
1227 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1228 value, kind, firm_unknown_type);
1229 } /* new_d_SymConst */
1232 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
1233 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1237 new_d_EndReg(dbg_info *db) {
1238 return new_bd_EndReg(db, current_ir_graph->current_block);
1239 } /* new_d_EndReg */
1242 new_d_EndExcept(dbg_info *db) {
1243 return new_bd_EndExcept(db, current_ir_graph->current_block);
1244 } /* new_d_EndExcept */
1248 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1249 int n_outs, ir_asm_constraint *outputs,
1250 int n_clobber, ident *clobber[], ident *asm_text) {
1251 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1254 /* ********************************************************************* */
1255 /* Comfortable interface with automatic Phi node construction. */
1256 /* (Uses also constructors of ?? interface, except new_Block. */
1257 /* ********************************************************************* */
1259 /* Block construction */
1260 /* immature Block without predecessors */
1262 new_d_immBlock(dbg_info *db) {
1265 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1266 /* creates a new dynamic in-array as length of in is -1 */
1267 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1269 /* macroblock head */
1272 res->attr.block.is_matured = 0;
1273 res->attr.block.is_dead = 0;
1274 res->attr.block.is_mb_head = 1;
1275 res->attr.block.irg.irg = current_ir_graph;
1276 res->attr.block.backedge = NULL;
1277 res->attr.block.in_cg = NULL;
1278 res->attr.block.cg_backedge = NULL;
1279 res->attr.block.extblk = NULL;
1280 res->attr.block.region = NULL;
1281 res->attr.block.mb_depth = 0;
1282 res->attr.block.entity = NULL;
1284 set_Block_block_visited(res, 0);
1286 /* Create and initialize array for Phi-node construction. */
1287 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1288 current_ir_graph->n_loc);
1289 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1291 /* Immature block may not be optimized! */
1292 IRN_VRFY_IRG(res, current_ir_graph);
1295 } /* new_d_immBlock */
1298 new_immBlock(void) {
1299 return new_d_immBlock(NULL);
1300 } /* new_immBlock */
1302 /* immature PartBlock with its predecessors */
1304 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
1305 ir_node *res = new_d_immBlock(db);
1306 ir_node *blk = get_nodes_block(pred_jmp);
1308 res->in[0] = blk->in[0];
1309 assert(res->in[0] != NULL);
1310 add_immBlock_pred(res, pred_jmp);
1312 res->attr.block.is_mb_head = 0;
1313 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1316 } /* new_d_immPartBlock */
1319 new_immPartBlock(ir_node *pred_jmp) {
1320 return new_d_immPartBlock(NULL, pred_jmp);
1321 } /* new_immPartBlock */
1323 /* add an edge to a jmp/control flow node */
1325 add_immBlock_pred(ir_node *block, ir_node *jmp) {
1326 int n = ARR_LEN(block->in) - 1;
1328 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1329 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1330 assert(is_ir_node(jmp));
1332 ARR_APP1(ir_node *, block->in, jmp);
1334 hook_set_irn_n(block, n, jmp, NULL);
1335 } /* add_immBlock_pred */
1337 /* changing the current block */
1339 set_cur_block(ir_node *target) {
1340 current_ir_graph->current_block = target;
1341 } /* set_cur_block */
1343 /* ************************ */
1344 /* parameter administration */
1346 /* get a value from the parameter array from the current block by its index */
1348 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
1349 ir_graph *irg = current_ir_graph;
1350 assert(get_irg_phase_state(irg) == phase_building);
1351 inc_irg_visited(irg);
1356 return get_r_value_internal(irg->current_block, pos + 1, mode);
1359 /* get a value from the parameter array from the current block by its index */
1361 get_value(int pos, ir_mode *mode) {
1362 return get_d_value(NULL, pos, mode);
1365 /* set a value at position pos in the parameter array from the current block */
1367 set_value(int pos, ir_node *value) {
1368 ir_graph *irg = current_ir_graph;
1369 assert(get_irg_phase_state(irg) == phase_building);
1371 assert(pos+1 < irg->n_loc);
1372 assert(is_ir_node(value));
1373 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1376 /* Find the value number for a node in the current block.*/
1378 find_value(ir_node *value) {
1380 ir_node *bl = current_ir_graph->current_block;
1382 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1383 if (bl->attr.block.graph_arr[i] == value)
1388 /* get the current store */
1391 ir_graph *irg = current_ir_graph;
1393 assert(get_irg_phase_state(irg) == phase_building);
1394 /* GL: one could call get_value instead */
1395 inc_irg_visited(irg);
1396 return get_r_value_internal(irg->current_block, 0, mode_M);
1399 /* set the current store: handles automatic Sync construction for Load nodes */
1401 set_store(ir_node *store) {
1402 ir_node *load, *pload, *pred, *in[2];
1404 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1405 /* Beware: due to dead code elimination, a store might become a Bad node even in
1406 the construction phase. */
1407 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1409 if (get_opt_auto_create_sync()) {
1410 /* handle non-volatile Load nodes by automatically creating Sync's */
1411 load = skip_Proj(store);
1412 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1413 pred = get_Load_mem(load);
1415 if (is_Sync(pred)) {
1416 /* a Load after a Sync: move it up */
1417 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1419 set_Load_mem(load, get_memop_mem(mem));
1420 add_Sync_pred(pred, store);
1423 pload = skip_Proj(pred);
1424 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1425 /* a Load after a Load: create a new Sync */
1426 set_Load_mem(load, get_Load_mem(pload));
1430 store = new_Sync(2, in);
1435 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1439 keep_alive(ir_node *ka) {
1440 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1443 /* --- Useful access routines --- */
1444 /* Returns the current block of the current graph. To set the current
1445 block use set_cur_block. */
1446 ir_node *get_cur_block(void) {
1447 return get_irg_current_block(current_ir_graph);
1448 } /* get_cur_block */
1450 /* Returns the frame type of the current graph */
1451 ir_type *get_cur_frame_type(void) {
1452 return get_irg_frame_type(current_ir_graph);
1453 } /* get_cur_frame_type */
1456 /* ********************************************************************* */
1459 /* call once for each run of the library */
1461 firm_init_cons(uninitialized_local_variable_func_t *func) {
1462 default_initialize_local_variable = func;
1463 } /* firm_init_cons */
1466 irp_finalize_cons(void) {
1468 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1469 irg_finalize_cons(get_irp_irg(i));
1471 irp->phase_state = phase_high;
1472 } /* irp_finalize_cons */
1474 ir_node *new_Start(void) {
1475 return new_d_Start(NULL);
1477 ir_node *new_End(void) {
1478 return new_d_End(NULL);
1480 ir_node *new_Const(tarval *con) {
1481 return new_d_Const(NULL, con);
1484 ir_node *new_Const_long(ir_mode *mode, long value) {
1485 return new_d_Const_long(NULL, mode, value);
1488 ir_node *new_Const_type(tarval *con, ir_type *tp) {
1489 return new_d_Const_type(NULL, con, tp);
1492 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
1493 return new_d_SymConst_type(NULL, mode, value, kind, type);
1495 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
1496 return new_d_SymConst(NULL, mode, value, kind);
1498 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
1499 return new_d_simpleSel(NULL, store, objptr, ent);
1501 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
1502 return new_d_Phi(NULL, arity, in, mode);
1504 ir_node *new_Sync(int arity, ir_node *in[]) {
1505 return new_d_Sync(NULL, arity, in);
1507 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
1508 return new_d_defaultProj(NULL, arg, max_proj);
1510 ir_node *new_Bad(void) {
1511 return get_irg_bad(current_ir_graph);
1513 ir_node *new_EndReg(void) {
1514 return new_d_EndReg(NULL);
1516 ir_node *new_EndExcept(void) {
1517 return new_d_EndExcept(NULL);
1519 ir_node *new_NoMem(void) {
1520 return get_irg_no_mem(current_ir_graph);
1522 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1523 int n_outs, ir_asm_constraint *outputs,
1524 int n_clobber, ident *clobber[], ident *asm_text) {
1525 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1528 /* create a new anchor node */
1529 ir_node *new_Anchor(ir_graph *irg) {
1530 ir_node *in[anchor_last];
1531 memset(in, 0, sizeof(in));
1532 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);