2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
46 /* when we need verifying */
48 # define IRN_VRFY_IRG(res, irg)
50 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
54 * Language dependent variable initialization callback.
56 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
58 /* creates a bd constructor for a binop */
59 #define NEW_BD_BINOP(instr) \
61 new_bd_##instr(dbg_info *db, ir_node *block, \
62 ir_node *op1, ir_node *op2, ir_mode *mode) \
66 ir_graph *irg = current_ir_graph; \
69 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
70 res = optimize_node(res); \
71 IRN_VRFY_IRG(res, irg); \
75 /* creates a bd constructor for an unop */
76 #define NEW_BD_UNOP(instr) \
78 new_bd_##instr(dbg_info *db, ir_node *block, \
79 ir_node *op, ir_mode *mode) \
82 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an divop */
90 #define NEW_BD_DIVOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
97 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
102 res->attr.divmod.exc.pin_state = state; \
103 res->attr.divmod.resmode = mode; \
104 res->attr.divmod.no_remainder = 0; \
105 res = optimize_node(res); \
106 IRN_VRFY_IRG(res, irg); \
110 /* creates a rd constructor for a binop */
111 #define NEW_RD_BINOP(instr) \
113 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
114 ir_node *op1, ir_node *op2, ir_mode *mode) \
117 ir_graph *rem = current_ir_graph; \
118 current_ir_graph = irg; \
119 res = new_bd_##instr(db, block, op1, op2, mode); \
120 current_ir_graph = rem; \
124 /* creates a rd constructor for an unop */
125 #define NEW_RD_UNOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an divop */
139 #define NEW_RD_DIVOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
148 current_ir_graph = rem; \
152 /* creates a d constructor for an binop */
153 #define NEW_D_BINOP(instr) \
155 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
156 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
159 /* creates a d constructor for an unop */
160 #define NEW_D_UNOP(instr) \
162 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
163 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
166 #include "gen_ir_cons.c.inl"
169 new_bd_Start(dbg_info *db, ir_node *block) {
171 ir_graph *irg = current_ir_graph;
173 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
175 IRN_VRFY_IRG(res, irg);
180 new_bd_End(dbg_info *db, ir_node *block) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
186 IRN_VRFY_IRG(res, irg);
191 * Creates a Phi node with all predecessors. Calling this constructor
192 * is only allowed if the corresponding block is mature.
195 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
197 ir_graph *irg = current_ir_graph;
201 /* Don't assert that block matured: the use of this constructor is strongly
203 if (get_Block_matured(block))
204 assert(get_irn_arity(block) == arity);
206 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
208 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
210 for (i = arity - 1; i >= 0; --i)
211 if (is_Unknown(in[i])) {
216 if (!has_unknown) res = optimize_node(res);
217 IRN_VRFY_IRG(res, irg);
219 /* Memory Phis in endless loops must be kept alive.
220 As we can't distinguish these easily we keep all of them alive. */
221 if (is_Phi(res) && mode == mode_M)
222 add_End_keepalive(get_irg_end(irg), res);
227 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
229 ir_graph *irg = current_ir_graph;
231 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
232 res->attr.con.tv = con;
233 set_Const_type(res, tp); /* Call method because of complex assertion. */
234 res = optimize_node (res);
235 assert(get_Const_type(res) == tp);
236 IRN_VRFY_IRG(res, irg);
239 } /* new_bd_Const_type */
242 new_bd_Const(dbg_info *db, tarval *con) {
243 ir_graph *irg = current_ir_graph;
245 return new_rd_Const_type (db, irg, con, firm_unknown_type);
249 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
250 ir_graph *irg = current_ir_graph;
252 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
253 } /* new_bd_Const_long */
256 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
259 ir_graph *irg = current_ir_graph;
261 assert(arg->op == op_Cond);
262 arg->attr.cond.kind = fragmentary;
263 arg->attr.cond.default_proj = max_proj;
264 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
266 } /* new_bd_defaultProj */
269 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
270 int arity, ir_node **in, ir_entity *ent) {
274 ir_graph *irg = current_ir_graph;
275 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
277 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
280 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
283 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
285 * Sel's can select functions which should be of mode mode_P_code.
287 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
288 res->attr.sel.entity = ent;
289 res = optimize_node(res);
290 IRN_VRFY_IRG(res, irg);
295 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
296 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
297 ir_graph *irg = current_ir_graph;
298 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
300 res->attr.symc.kind = symkind;
301 res->attr.symc.sym = value;
302 res->attr.symc.tp = tp;
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
307 } /* new_bd_SymConst_type */
310 new_bd_Sync(dbg_info *db, ir_node *block) {
312 ir_graph *irg = current_ir_graph;
314 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
315 /* no need to call optimize node here, Sync are always created with no predecessors */
316 IRN_VRFY_IRG(res, irg);
322 new_bd_EndReg(dbg_info *db, ir_node *block) {
324 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
327 set_irg_end_reg(irg, res);
328 IRN_VRFY_IRG(res, irg);
330 } /* new_bd_EndReg */
333 new_bd_EndExcept(dbg_info *db, ir_node *block) {
335 ir_graph *irg = current_ir_graph;
337 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
338 set_irg_end_except(irg, res);
339 IRN_VRFY_IRG (res, irg);
341 } /* new_bd_EndExcept */
344 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
345 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
350 res->attr.assem.pin_state = op_pin_state_pinned;
351 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
352 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
353 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
354 res->attr.assem.asm_text = asm_text;
356 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
357 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
358 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
360 res = optimize_node(res);
361 IRN_VRFY_IRG(res, irg);
365 /* --------------------------------------------- */
366 /* private interfaces, for professional use only */
367 /* --------------------------------------------- */
370 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
371 ir_graph *rem = current_ir_graph;
374 current_ir_graph = irg;
375 res = new_bd_Start(db, block);
376 current_ir_graph = rem;
382 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
384 ir_graph *rem = current_ir_graph;
386 current_ir_graph = irg;
387 res = new_bd_End(db, block);
388 current_ir_graph = rem;
393 /* Creates a Phi node with all predecessors. Calling this constructor
394 is only allowed if the corresponding block is mature. */
396 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
398 ir_graph *rem = current_ir_graph;
400 current_ir_graph = irg;
401 res = new_bd_Phi(db, block,arity, in, mode);
402 current_ir_graph = rem;
408 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
410 ir_graph *rem = current_ir_graph;
412 current_ir_graph = irg;
413 res = new_bd_Const_type(db, con, tp);
414 current_ir_graph = rem;
417 } /* new_rd_Const_type */
420 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
422 //#ifdef USE_ORIGINAL
423 ir_graph *rem = current_ir_graph;
425 current_ir_graph = irg;
426 res = new_bd_Const_type(db, con, firm_unknown_type);
427 current_ir_graph = rem;
429 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
436 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
437 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
438 } /* new_rd_Const_long */
441 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
444 ir_graph *rem = current_ir_graph;
446 current_ir_graph = irg;
447 res = new_bd_defaultProj(db, block, arg, max_proj);
448 current_ir_graph = rem;
451 } /* new_rd_defaultProj */
454 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
455 ir_node *store, ir_node *objptr, ir_entity *ent) {
457 ir_graph *rem = current_ir_graph;
459 current_ir_graph = irg;
460 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
461 current_ir_graph = rem;
464 } /* new_rd_simpleSel */
467 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
468 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
470 ir_graph *rem = current_ir_graph;
472 current_ir_graph = irg;
473 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
474 current_ir_graph = rem;
477 } /* new_rd_SymConst_type */
480 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
481 symconst_symbol value, symconst_kind symkind) {
482 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
483 } /* new_rd_SymConst */
485 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
487 sym.entity_p = symbol;
488 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
489 } /* new_rd_SymConst_addr_ent */
491 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
493 sym.entity_p = symbol;
494 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
495 } /* new_rd_SymConst_ofs_ent */
497 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
499 sym.ident_p = symbol;
500 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
501 } /* new_rd_SymConst_addr_name */
503 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
506 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
507 } /* new_rd_SymConst_type_tag */
509 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
512 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
513 } /* new_rd_SymConst_size */
515 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
518 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
519 } /* new_rd_SymConst_align */
522 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
524 ir_graph *rem = current_ir_graph;
527 current_ir_graph = irg;
528 res = new_bd_Sync(db, block);
529 current_ir_graph = rem;
531 for (i = 0; i < arity; ++i)
532 add_Sync_pred(res, in[i]);
538 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
541 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
542 set_irg_end_reg(irg, res);
543 IRN_VRFY_IRG(res, irg);
545 } /* new_rd_EndReg */
548 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
551 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
552 set_irg_end_except(irg, res);
553 IRN_VRFY_IRG (res, irg);
555 } /* new_rd_EndExcept */
557 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
558 int arity, ir_node *in[], ir_asm_constraint *inputs,
559 int n_outs, ir_asm_constraint *outputs,
560 int n_clobber, ident *clobber[], ident *asm_text) {
562 ir_graph *rem = current_ir_graph;
564 current_ir_graph = irg;
565 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
566 current_ir_graph = rem;
571 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
572 return new_rd_Start(NULL, irg, block);
574 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
575 return new_rd_End(NULL, irg, block);
577 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
578 return new_rd_Const(NULL, irg, con);
580 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
581 return new_rd_Const_long(NULL, irg, mode, value);
583 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
584 return new_rd_Const_type(NULL, irg, con, tp);
586 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
587 symconst_symbol value, symconst_kind symkind) {
588 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
590 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
591 ir_node *objptr, ir_entity *ent) {
592 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
594 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
595 ir_node **in, ir_mode *mode) {
596 return new_rd_Phi(NULL, irg, block, arity, in, mode);
598 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
599 return new_rd_Sync(NULL, irg, block, arity, in);
601 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
603 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
605 ir_node *new_r_Bad(ir_graph *irg) {
606 return get_irg_bad(irg);
608 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
609 return new_rd_EndReg(NULL, irg, block);
611 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
612 return new_rd_EndExcept(NULL, irg, block);
614 ir_node *new_r_NoMem(ir_graph *irg) {
615 return get_irg_no_mem(irg);
617 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
618 int arity, ir_node *in[], ir_asm_constraint *inputs,
619 int n_outs, ir_asm_constraint *outputs,
620 int n_clobber, ident *clobber[], ident *asm_text) {
621 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
624 /** ********************/
625 /** public interfaces */
626 /** construction tools */
629 new_d_Start(dbg_info *db) {
632 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
633 op_Start, mode_T, 0, NULL);
635 res = optimize_node(res);
636 IRN_VRFY_IRG(res, current_ir_graph);
641 new_d_End(dbg_info *db) {
643 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
644 op_End, mode_X, -1, NULL);
645 res = optimize_node(res);
646 IRN_VRFY_IRG(res, current_ir_graph);
651 /* ***********************************************************************/
652 /* Methods necessary for automatic Phi node creation */
654 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
655 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
656 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
657 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
659 Call Graph: ( A ---> B == A "calls" B)
661 get_value mature_immBlock
669 get_r_value_internal |
673 new_rd_Phi0 new_rd_Phi_in
675 * *************************************************************************** */
677 /** Creates a Phi node with 0 predecessors. */
678 static inline ir_node *
679 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
682 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
683 IRN_VRFY_IRG(res, irg);
689 * Internal constructor of a Phi node by a phi_merge operation.
691 * @param irg the graph on which the Phi will be constructed
692 * @param block the block in which the Phi will be constructed
693 * @param mode the mod eof the Phi node
694 * @param in the input array of the phi node
695 * @param ins number of elements in the input array
696 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
697 * the value for which the new Phi is constructed
699 static inline ir_node *
700 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
701 ir_node **in, int ins, ir_node *phi0) {
703 ir_node *res, *known;
705 /* Allocate a new node on the obstack. The allocation copies the in
707 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
708 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
710 /* This loop checks whether the Phi has more than one predecessor.
711 If so, it is a real Phi node and we break the loop. Else the
712 Phi node merges the same definition on several paths and therefore
714 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
716 for (i = ins - 1; i >= 0; --i) {
719 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
721 /* Optimize self referencing Phis: We can't detect them yet properly, as
722 they still refer to the Phi0 they will replace. So replace right now. */
723 if (phi0 && in[i] == phi0)
726 if (in[i] == res || in[i] == known)
735 /* i < 0: there is at most one predecessor, we don't need a phi node. */
738 edges_node_deleted(res, current_ir_graph);
739 obstack_free(current_ir_graph->obst, res);
741 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
742 order, an enclosing Phi know may get superfluous. */
743 res = optimize_in_place_2(known);
745 exchange(known, res);
750 /* A undefined value, e.g., in unreachable code. */
754 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
755 IRN_VRFY_IRG(res, irg);
756 /* Memory Phis in endless loops must be kept alive.
757 As we can't distinguish these easily we keep all of them alive. */
758 if (is_Phi(res) && mode == mode_M)
759 add_End_keepalive(get_irg_end(irg), res);
763 } /* new_rd_Phi_in */
766 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
769 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
772 * Construct a new frag_array for node n.
773 * Copy the content from the current graph_arr of the corresponding block:
774 * this is the current state.
775 * Set ProjM(n) as current memory state.
776 * Further the last entry in frag_arr of current block points to n. This
777 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
779 static inline ir_node **new_frag_arr(ir_node *n) {
783 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
784 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
785 sizeof(ir_node *)*current_ir_graph->n_loc);
787 /* turn off optimization before allocating Proj nodes, as res isn't
789 opt = get_opt_optimize(); set_optimize(0);
790 /* Here we rely on the fact that all frag ops have Memory as first result! */
792 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
793 } else if (is_CopyB(n)) {
794 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
796 assert((pn_Quot_M == pn_DivMod_M) &&
797 (pn_Quot_M == pn_Div_M) &&
798 (pn_Quot_M == pn_Mod_M) &&
799 (pn_Quot_M == pn_Load_M) &&
800 (pn_Quot_M == pn_Store_M) &&
801 (pn_Quot_M == pn_Alloc_M) &&
802 (pn_Quot_M == pn_Bound_M));
803 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
807 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
812 * Returns the frag_arr from a node.
814 static inline ir_node **get_frag_arr(ir_node *n) {
815 switch (get_irn_opcode(n)) {
817 return n->attr.call.exc.frag_arr;
819 return n->attr.alloc.exc.frag_arr;
821 return n->attr.load.exc.frag_arr;
823 return n->attr.store.exc.frag_arr;
825 return n->attr.except.frag_arr;
830 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
834 for (i = 1024; i >= 0; --i)
839 if (frag_arr[pos] == NULL)
841 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
842 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
843 assert(arr != frag_arr && "Endless recursion detected");
848 assert(!"potential endless recursion in set_frag_value");
849 } /* set_frag_value */
852 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
856 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
858 frag_arr = get_frag_arr(cfOp);
861 if (block->attr.block.graph_arr[pos] != NULL) {
862 /* There was a set_value() after the cfOp and no get_value() before that
863 set_value(). We must build a Phi node now. */
864 if (block->attr.block.is_matured) {
865 int ins = get_irn_arity(block);
867 NEW_ARR_A(ir_node *, nin, ins);
868 res = phi_merge(block, pos, mode, nin, ins);
870 res = new_rd_Phi0(current_ir_graph, block, mode);
871 res->attr.phi.u.pos = pos;
872 res->attr.phi.next = block->attr.block.phis;
873 block->attr.block.phis = res;
876 /* It's a Phi, we can write this into all graph_arrs with NULL */
877 set_frag_value(block->attr.block.graph_arr, pos, res);
879 res = get_r_value_internal(block, pos, mode);
880 set_frag_value(block->attr.block.graph_arr, pos, res);
884 } /* get_r_frag_value_internal */
887 * Check whether a control flownode cf_pred represents an exception flow.
889 * @param cf_pred the control flow node
890 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
892 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
894 * Note: all projections from a raise are "exceptional control flow" we we handle it
895 * like a normal Jmp, because there is no "regular" one.
896 * That's why Raise is no "fragile_op"!
898 if (is_fragile_op(prev_cf_op)) {
899 if (is_Proj(cf_pred)) {
900 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
901 /* the regular control flow, NO exception */
904 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
907 /* Hmm, exception but not a Proj? */
908 assert(!"unexpected condition: fragile op without a proj");
912 } /* is_exception_flow */
915 * Computes the predecessors for the real phi node, and then
916 * allocates and returns this node. The routine called to allocate the
917 * node might optimize it away and return a real value.
918 * This function must be called with an in-array of proper size.
921 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
922 ir_node *prevBlock, *res, *phi0, *phi0_all;
925 /* If this block has no value at pos create a Phi0 and remember it
926 in graph_arr to break recursions.
927 Else we may not set graph_arr as there a later value is remembered. */
929 if (block->attr.block.graph_arr[pos] == NULL) {
930 ir_graph *irg = current_ir_graph;
932 if (block == get_irg_start_block(irg)) {
933 /* Collapsing to Bad tarvals is no good idea.
934 So we call a user-supplied routine here that deals with this case as
935 appropriate for the given language. Sorrily the only help we can give
936 here is the position.
938 Even if all variables are defined before use, it can happen that
939 we get to the start block, if a Cond has been replaced by a tuple
940 (bad, jmp). In this case we call the function needlessly, eventually
941 generating an non existent error.
942 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
945 if (default_initialize_local_variable != NULL) {
946 ir_node *rem = get_cur_block();
948 set_cur_block(block);
949 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
953 block->attr.block.graph_arr[pos] = new_Unknown(mode);
954 /* We don't need to care about exception ops in the start block.
955 There are none by definition. */
956 return block->attr.block.graph_arr[pos];
958 phi0 = new_rd_Phi0(irg, block, mode);
959 block->attr.block.graph_arr[pos] = phi0;
960 if (get_opt_precise_exc_context()) {
961 /* Set graph_arr for fragile ops. Also here we should break recursion.
962 We could choose a cyclic path through an cfop. But the recursion would
963 break at some point. */
964 set_frag_value(block->attr.block.graph_arr, pos, phi0);
969 /* This loop goes to all predecessor blocks of the block the Phi node
970 is in and there finds the operands of the Phi node by calling
971 get_r_value_internal. */
972 for (i = 1; i <= ins; ++i) {
973 ir_node *cf_pred = block->in[i];
974 ir_node *prevCfOp = skip_Proj(cf_pred);
976 if (is_Bad(prevCfOp)) {
977 /* In case a Cond has been optimized we would get right to the start block
978 with an invalid definition. */
979 nin[i-1] = new_Bad();
982 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
984 if (!is_Bad(prevBlock)) {
985 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
986 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
987 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
989 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
991 nin[i-1] = new_Bad();
995 /* We want to pass the Phi0 node to the constructor: this finds additional
996 optimization possibilities.
997 The Phi0 node either is allocated in this function, or it comes from
998 a former call to get_r_value_internal(). In this case we may not yet
999 exchange phi0, as this is done in mature_immBlock(). */
1001 phi0_all = block->attr.block.graph_arr[pos];
1002 if (! is_Phi0(phi0_all) ||
1003 get_irn_arity(phi0_all) != 0 ||
1004 get_nodes_block(phi0_all) != block)
1010 /* After collecting all predecessors into the array nin a new Phi node
1011 with these predecessors is created. This constructor contains an
1012 optimization: If all predecessors of the Phi node are identical it
1013 returns the only operand instead of a new Phi node. */
1014 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1016 /* In case we allocated a Phi0 node at the beginning of this procedure,
1017 we need to exchange this Phi0 with the real Phi. */
1019 exchange(phi0, res);
1020 block->attr.block.graph_arr[pos] = res;
1021 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1022 only an optimization. */
1029 * This function returns the last definition of a value. In case
1030 * this value was last defined in a previous block, Phi nodes are
1031 * inserted. If the part of the firm graph containing the definition
1032 * is not yet constructed, a dummy Phi node is returned.
1034 * @param block the current block
1035 * @param pos the value number of the value searched
1036 * @param mode the mode of this value (needed for Phi construction)
1039 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
1041 /* There are 4 cases to treat.
1043 1. The block is not mature and we visit it the first time. We can not
1044 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1045 predecessors is returned. This node is added to the linked list (block
1046 attribute "phis") of the containing block to be completed when this block is
1047 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1050 2. The value is already known in this block, graph_arr[pos] is set and we
1051 visit the block the first time. We can return the value without
1052 creating any new nodes.
1054 3. The block is mature and we visit it the first time. A Phi node needs
1055 to be created (phi_merge). If the Phi is not needed, as all it's
1056 operands are the same value reaching the block through different
1057 paths, it's optimized away and the value itself is returned.
1059 4. The block is mature, and we visit it the second time. Now two
1060 subcases are possible:
1061 * The value was computed completely the last time we were here. This
1062 is the case if there is no loop. We can return the proper value.
1063 * The recursion that visited this node and set the flag did not
1064 return yet. We are computing a value in a loop and need to
1065 break the recursion. This case only happens if we visited
1066 the same block with phi_merge before, which inserted a Phi0.
1067 So we return the Phi0.
1070 /* case 4 -- already visited. */
1071 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1072 /* As phi_merge allocates a Phi0 this value is always defined. Here
1073 is the critical difference of the two algorithms. */
1074 assert(block->attr.block.graph_arr[pos]);
1075 return block->attr.block.graph_arr[pos];
1078 /* visited the first time */
1079 set_irn_visited(block, get_irg_visited(current_ir_graph));
1081 /* Get the local valid value */
1082 res = block->attr.block.graph_arr[pos];
1084 /* case 2 -- If the value is actually computed, return it. */
1088 if (block->attr.block.is_matured) { /* case 3 */
1090 /* The Phi has the same amount of ins as the corresponding block. */
1091 int ins = get_irn_arity(block);
1093 NEW_ARR_A(ir_node *, nin, ins);
1095 /* Phi merge collects the predecessors and then creates a node. */
1096 res = phi_merge(block, pos, mode, nin, ins);
1098 } else { /* case 1 */
1099 /* The block is not mature, we don't know how many in's are needed. A Phi
1100 with zero predecessors is created. Such a Phi node is called Phi0
1101 node. The Phi0 is then added to the list of Phi0 nodes in this block
1102 to be matured by mature_immBlock later.
1103 The Phi0 has to remember the pos of it's internal value. If the real
1104 Phi is computed, pos is used to update the array with the local
1106 res = new_rd_Phi0(current_ir_graph, block, mode);
1107 res->attr.phi.u.pos = pos;
1108 res->attr.phi.next = block->attr.block.phis;
1109 block->attr.block.phis = res;
1112 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1114 /* The local valid value is available now. */
1115 block->attr.block.graph_arr[pos] = res;
1118 } /* get_r_value_internal */
1120 /* ************************************************************************** */
1123 * Finalize a Block node, when all control flows are known.
1124 * Acceptable parameters are only Block nodes.
1127 mature_immBlock(ir_node *block) {
1132 assert(is_Block(block));
1133 if (!get_Block_matured(block)) {
1134 ir_graph *irg = current_ir_graph;
1136 ins = ARR_LEN(block->in) - 1;
1137 /* Fix block parameters */
1138 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1140 /* An array for building the Phi nodes. */
1141 NEW_ARR_A(ir_node *, nin, ins);
1143 /* Traverse a chain of Phi nodes attached to this block and mature
1145 for (n = block->attr.block.phis; n; n = next) {
1146 inc_irg_visited(irg);
1147 next = n->attr.phi.next;
1148 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1151 block->attr.block.is_matured = 1;
1153 /* Now, as the block is a finished Firm node, we can optimize it.
1154 Since other nodes have been allocated since the block was created
1155 we can not free the node on the obstack. Therefore we have to call
1156 optimize_in_place().
1157 Unfortunately the optimization does not change a lot, as all allocated
1158 nodes refer to the unoptimized node.
1159 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1160 block = optimize_in_place_2(block);
1161 IRN_VRFY_IRG(block, irg);
1163 } /* mature_immBlock */
1166 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
1167 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1171 new_d_Const(dbg_info *db, tarval *con) {
1172 return new_bd_Const(db, con);
1176 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
1177 return new_bd_Const_long(db, mode, value);
1178 } /* new_d_Const_long */
1181 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
1182 return new_bd_Const_type(db, con, tp);
1183 } /* new_d_Const_type */
1187 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
1189 assert(arg->op == op_Cond);
1190 arg->attr.cond.kind = fragmentary;
1191 arg->attr.cond.default_proj = max_proj;
1192 res = new_d_Proj(db, arg, mode_X, max_proj);
1194 } /* new_d_defaultProj */
1197 * Allocate a frag array for a node if the current graph state is phase_building.
1199 * @param irn the node for which the frag array should be allocated
1200 * @param op the opcode of the (original) node, if does not match opcode of irn,
1202 * @param frag_store the address of the frag store in irn attributes, if this
1203 * address contains a value != NULL, does nothing
1205 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
1206 if (get_opt_precise_exc_context()) {
1207 if ((current_ir_graph->phase_state == phase_building) &&
1208 (get_irn_op(irn) == op) && /* Could be optimized away. */
1209 !*frag_store) /* Could be a cse where the arr is already set. */ {
1210 *frag_store = new_frag_arr(irn);
1213 } /* firm_alloc_frag_arr */
1216 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1217 /* GL: objptr was called frame before. Frame was a bad choice for the name
1218 as the operand could as well be a pointer to a dynamic object. */
1220 return new_bd_Sel(db, current_ir_graph->current_block,
1221 store, objptr, 0, NULL, ent);
1222 } /* new_d_simpleSel */
1225 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
1226 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1228 } /* new_d_SymConst_type */
1231 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
1232 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1233 value, kind, firm_unknown_type);
1234 } /* new_d_SymConst */
1237 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
1238 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
1242 new_d_EndReg(dbg_info *db) {
1243 return new_bd_EndReg(db, current_ir_graph->current_block);
1244 } /* new_d_EndReg */
1247 new_d_EndExcept(dbg_info *db) {
1248 return new_bd_EndExcept(db, current_ir_graph->current_block);
1249 } /* new_d_EndExcept */
1253 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1254 int n_outs, ir_asm_constraint *outputs,
1255 int n_clobber, ident *clobber[], ident *asm_text) {
1256 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1259 /* ********************************************************************* */
1260 /* Comfortable interface with automatic Phi node construction. */
1261 /* (Uses also constructors of ?? interface, except new_Block. */
1262 /* ********************************************************************* */
1264 /* Block construction */
1265 /* immature Block without predecessors */
1267 new_d_immBlock(dbg_info *db) {
1270 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1271 /* creates a new dynamic in-array as length of in is -1 */
1272 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1274 /* macroblock head */
1277 res->attr.block.is_matured = 0;
1278 res->attr.block.is_dead = 0;
1279 res->attr.block.is_mb_head = 1;
1280 res->attr.block.irg.irg = current_ir_graph;
1281 res->attr.block.backedge = NULL;
1282 res->attr.block.in_cg = NULL;
1283 res->attr.block.cg_backedge = NULL;
1284 res->attr.block.extblk = NULL;
1285 res->attr.block.region = NULL;
1286 res->attr.block.mb_depth = 0;
1287 res->attr.block.entity = NULL;
1289 set_Block_block_visited(res, 0);
1291 /* Create and initialize array for Phi-node construction. */
1292 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1293 current_ir_graph->n_loc);
1294 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1296 /* Immature block may not be optimized! */
1297 IRN_VRFY_IRG(res, current_ir_graph);
1300 } /* new_d_immBlock */
1303 new_immBlock(void) {
1304 return new_d_immBlock(NULL);
1305 } /* new_immBlock */
1307 /* immature PartBlock with its predecessors */
1309 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
1310 ir_node *res = new_d_immBlock(db);
1311 ir_node *blk = get_nodes_block(pred_jmp);
1313 res->in[0] = blk->in[0];
1314 assert(res->in[0] != NULL);
1315 add_immBlock_pred(res, pred_jmp);
1317 res->attr.block.is_mb_head = 0;
1318 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1321 } /* new_d_immPartBlock */
1324 new_immPartBlock(ir_node *pred_jmp) {
1325 return new_d_immPartBlock(NULL, pred_jmp);
1326 } /* new_immPartBlock */
1328 /* add an edge to a jmp/control flow node */
1330 add_immBlock_pred(ir_node *block, ir_node *jmp) {
1331 int n = ARR_LEN(block->in) - 1;
1333 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1334 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1335 assert(is_ir_node(jmp));
1337 ARR_APP1(ir_node *, block->in, jmp);
1339 hook_set_irn_n(block, n, jmp, NULL);
1340 } /* add_immBlock_pred */
1342 /* changing the current block */
1344 set_cur_block(ir_node *target) {
1345 current_ir_graph->current_block = target;
1346 } /* set_cur_block */
1348 /* ************************ */
1349 /* parameter administration */
1351 /* get a value from the parameter array from the current block by its index */
1353 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
1354 ir_graph *irg = current_ir_graph;
1355 assert(get_irg_phase_state(irg) == phase_building);
1356 inc_irg_visited(irg);
1361 return get_r_value_internal(irg->current_block, pos + 1, mode);
1364 /* get a value from the parameter array from the current block by its index */
1366 get_value(int pos, ir_mode *mode) {
1367 return get_d_value(NULL, pos, mode);
1370 /* set a value at position pos in the parameter array from the current block */
1372 set_value(int pos, ir_node *value) {
1373 ir_graph *irg = current_ir_graph;
1374 assert(get_irg_phase_state(irg) == phase_building);
1376 assert(pos+1 < irg->n_loc);
1377 assert(is_ir_node(value));
1378 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1381 /* Find the value number for a node in the current block.*/
1383 find_value(ir_node *value) {
1385 ir_node *bl = current_ir_graph->current_block;
1387 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1388 if (bl->attr.block.graph_arr[i] == value)
1393 /* get the current store */
1396 ir_graph *irg = current_ir_graph;
1398 assert(get_irg_phase_state(irg) == phase_building);
1399 /* GL: one could call get_value instead */
1400 inc_irg_visited(irg);
1401 return get_r_value_internal(irg->current_block, 0, mode_M);
1404 /* set the current store: handles automatic Sync construction for Load nodes */
1406 set_store(ir_node *store) {
1407 ir_node *load, *pload, *pred, *in[2];
1409 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1410 /* Beware: due to dead code elimination, a store might become a Bad node even in
1411 the construction phase. */
1412 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1414 if (get_opt_auto_create_sync()) {
1415 /* handle non-volatile Load nodes by automatically creating Sync's */
1416 load = skip_Proj(store);
1417 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1418 pred = get_Load_mem(load);
1420 if (is_Sync(pred)) {
1421 /* a Load after a Sync: move it up */
1422 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1424 set_Load_mem(load, get_memop_mem(mem));
1425 add_Sync_pred(pred, store);
1428 pload = skip_Proj(pred);
1429 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1430 /* a Load after a Load: create a new Sync */
1431 set_Load_mem(load, get_Load_mem(pload));
1435 store = new_Sync(2, in);
1440 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1444 keep_alive(ir_node *ka) {
1445 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1448 /* --- Useful access routines --- */
1449 /* Returns the current block of the current graph. To set the current
1450 block use set_cur_block. */
1451 ir_node *get_cur_block(void) {
1452 return get_irg_current_block(current_ir_graph);
1453 } /* get_cur_block */
1455 /* Returns the frame type of the current graph */
1456 ir_type *get_cur_frame_type(void) {
1457 return get_irg_frame_type(current_ir_graph);
1458 } /* get_cur_frame_type */
1461 /* ********************************************************************* */
1464 /* call once for each run of the library */
1466 firm_init_cons(uninitialized_local_variable_func_t *func) {
1467 default_initialize_local_variable = func;
1468 } /* firm_init_cons */
1471 irp_finalize_cons(void) {
1473 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1474 irg_finalize_cons(get_irp_irg(i));
1476 irp->phase_state = phase_high;
1477 } /* irp_finalize_cons */
1479 ir_node *new_Start(void) {
1480 return new_d_Start(NULL);
1482 ir_node *new_End(void) {
1483 return new_d_End(NULL);
1485 ir_node *new_Const(tarval *con) {
1486 return new_d_Const(NULL, con);
1489 ir_node *new_Const_long(ir_mode *mode, long value) {
1490 return new_d_Const_long(NULL, mode, value);
1493 ir_node *new_Const_type(tarval *con, ir_type *tp) {
1494 return new_d_Const_type(NULL, con, tp);
1497 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
1498 return new_d_SymConst_type(NULL, mode, value, kind, type);
1500 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
1501 return new_d_SymConst(NULL, mode, value, kind);
1503 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
1504 return new_d_simpleSel(NULL, store, objptr, ent);
1506 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
1507 return new_d_Phi(NULL, arity, in, mode);
1509 ir_node *new_Sync(int arity, ir_node *in[]) {
1510 return new_d_Sync(NULL, arity, in);
1512 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
1513 return new_d_defaultProj(NULL, arg, max_proj);
1515 ir_node *new_Bad(void) {
1516 return get_irg_bad(current_ir_graph);
1518 ir_node *new_EndReg(void) {
1519 return new_d_EndReg(NULL);
1521 ir_node *new_EndExcept(void) {
1522 return new_d_EndExcept(NULL);
1524 ir_node *new_NoMem(void) {
1525 return get_irg_no_mem(current_ir_graph);
1527 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1528 int n_outs, ir_asm_constraint *outputs,
1529 int n_clobber, ident *clobber[], ident *asm_text) {
1530 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1533 /* create a new anchor node */
1534 ir_node *new_Anchor(ir_graph *irg) {
1535 ir_node *in[anchor_last];
1536 memset(in, 0, sizeof(in));
1537 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);