2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
46 /* when we need verifying */
48 # define IRN_VRFY_IRG(res, irg)
50 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
54 * Language dependent variable initialization callback.
56 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
58 /* creates a bd constructor for a binop */
59 #define NEW_BD_BINOP(instr) \
61 new_bd_##instr(dbg_info *db, ir_node *block, \
62 ir_node *op1, ir_node *op2, ir_mode *mode) \
66 ir_graph *irg = current_ir_graph; \
69 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
70 res = optimize_node(res); \
71 IRN_VRFY_IRG(res, irg); \
75 /* creates a bd constructor for an unop */
76 #define NEW_BD_UNOP(instr) \
78 new_bd_##instr(dbg_info *db, ir_node *block, \
79 ir_node *op, ir_mode *mode) \
82 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an divop */
90 #define NEW_BD_DIVOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
97 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
102 res->attr.divmod.exc.pin_state = state; \
103 res->attr.divmod.resmode = mode; \
104 res->attr.divmod.no_remainder = 0; \
105 res = optimize_node(res); \
106 IRN_VRFY_IRG(res, irg); \
110 /* creates a rd constructor for a binop */
111 #define NEW_RD_BINOP(instr) \
113 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
114 ir_node *op1, ir_node *op2, ir_mode *mode) \
117 ir_graph *rem = current_ir_graph; \
118 current_ir_graph = irg; \
119 res = new_bd_##instr(db, block, op1, op2, mode); \
120 current_ir_graph = rem; \
124 /* creates a rd constructor for an unop */
125 #define NEW_RD_UNOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an divop */
139 #define NEW_RD_DIVOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
148 current_ir_graph = rem; \
152 /* creates a d constructor for an binop */
153 #define NEW_D_BINOP(instr) \
155 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
156 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
159 /* creates a d constructor for an unop */
160 #define NEW_D_UNOP(instr) \
162 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
163 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
166 #include "gen_ir_cons.c.inl"
169 new_bd_Start(dbg_info *db, ir_node *block) {
171 ir_graph *irg = current_ir_graph;
173 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
175 IRN_VRFY_IRG(res, irg);
180 new_bd_End(dbg_info *db, ir_node *block) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
186 IRN_VRFY_IRG(res, irg);
191 * Creates a Phi node with all predecessors. Calling this constructor
192 * is only allowed if the corresponding block is mature.
195 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
197 ir_graph *irg = current_ir_graph;
201 /* Don't assert that block matured: the use of this constructor is strongly
203 if (get_Block_matured(block))
204 assert(get_irn_arity(block) == arity);
206 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
208 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
210 for (i = arity - 1; i >= 0; --i)
211 if (is_Unknown(in[i])) {
216 if (!has_unknown) res = optimize_node(res);
217 IRN_VRFY_IRG(res, irg);
219 /* Memory Phis in endless loops must be kept alive.
220 As we can't distinguish these easily we keep all of them alive. */
221 if (is_Phi(res) && mode == mode_M)
222 add_End_keepalive(get_irg_end(irg), res);
227 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
229 ir_graph *irg = current_ir_graph;
231 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
232 res->attr.con.tv = con;
233 set_Const_type(res, tp); /* Call method because of complex assertion. */
234 res = optimize_node (res);
235 assert(get_Const_type(res) == tp);
236 IRN_VRFY_IRG(res, irg);
239 } /* new_bd_Const_type */
242 new_bd_Const(dbg_info *db, tarval *con) {
243 ir_graph *irg = current_ir_graph;
245 return new_rd_Const_type(db, irg, con, firm_unknown_type);
249 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
250 ir_graph *irg = current_ir_graph;
252 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
253 } /* new_bd_Const_long */
256 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
260 assert(arg->op == op_Cond);
261 arg->attr.cond.kind = fragmentary;
262 arg->attr.cond.default_proj = max_proj;
263 res = new_rd_Proj(db, block, arg, mode_X, max_proj);
265 } /* new_bd_defaultProj */
268 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
269 int arity, ir_node **in, ir_entity *ent) {
273 ir_graph *irg = current_ir_graph;
274 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
276 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
279 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
282 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
284 * Sel's can select functions which should be of mode mode_P_code.
286 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
287 res->attr.sel.entity = ent;
288 res = optimize_node(res);
289 IRN_VRFY_IRG(res, irg);
294 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
295 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
296 ir_graph *irg = current_ir_graph;
297 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
299 res->attr.symc.kind = symkind;
300 res->attr.symc.sym = value;
301 res->attr.symc.tp = tp;
303 res = optimize_node(res);
304 IRN_VRFY_IRG(res, irg);
306 } /* new_bd_SymConst_type */
309 new_bd_Sync(dbg_info *db, ir_node *block) {
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
314 /* no need to call optimize node here, Sync are always created with no predecessors */
315 IRN_VRFY_IRG(res, irg);
321 new_bd_EndReg(dbg_info *db, ir_node *block) {
323 ir_graph *irg = current_ir_graph;
325 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
326 set_irg_end_reg(irg, res);
327 IRN_VRFY_IRG(res, irg);
329 } /* new_bd_EndReg */
332 new_bd_EndExcept(dbg_info *db, ir_node *block) {
334 ir_graph *irg = current_ir_graph;
336 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
337 set_irg_end_except(irg, res);
338 IRN_VRFY_IRG (res, irg);
340 } /* new_bd_EndExcept */
343 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
344 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
346 ir_graph *irg = current_ir_graph;
348 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
349 res->attr.assem.pin_state = op_pin_state_pinned;
350 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
351 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
352 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
353 res->attr.assem.asm_text = asm_text;
355 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
356 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
357 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
359 res = optimize_node(res);
360 IRN_VRFY_IRG(res, irg);
364 /* --------------------------------------------- */
365 /* private interfaces, for professional use only */
366 /* --------------------------------------------- */
369 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
370 ir_graph *rem = current_ir_graph;
373 current_ir_graph = irg;
374 res = new_bd_Start(db, block);
375 current_ir_graph = rem;
381 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
383 ir_graph *rem = current_ir_graph;
385 current_ir_graph = irg;
386 res = new_bd_End(db, block);
387 current_ir_graph = rem;
392 /* Creates a Phi node with all predecessors. Calling this constructor
393 is only allowed if the corresponding block is mature. */
395 new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
397 ir_graph *rem = current_ir_graph;
399 current_ir_graph = get_Block_irg(block);
400 res = new_bd_Phi(db, block,arity, in, mode);
401 current_ir_graph = rem;
407 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
409 ir_graph *rem = current_ir_graph;
411 current_ir_graph = irg;
412 res = new_bd_Const_type(db, con, tp);
413 current_ir_graph = rem;
416 } /* new_rd_Const_type */
419 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
421 //#ifdef USE_ORIGINAL
422 ir_graph *rem = current_ir_graph;
424 current_ir_graph = irg;
425 res = new_bd_Const_type(db, con, firm_unknown_type);
426 current_ir_graph = rem;
428 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
435 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
436 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
437 } /* new_rd_Const_long */
440 new_rd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg, long max_proj) {
442 ir_graph *rem = current_ir_graph;
444 current_ir_graph = get_Block_irg(block);
445 res = new_bd_defaultProj(db, block, arg, max_proj);
446 current_ir_graph = rem;
449 } /* new_rd_defaultProj */
452 new_rd_simpleSel(dbg_info *db, ir_node *block,
453 ir_node *store, ir_node *objptr, ir_entity *ent) {
455 ir_graph *rem = current_ir_graph;
457 current_ir_graph = get_Block_irg(block);
458 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
459 current_ir_graph = rem;
462 } /* new_rd_simpleSel */
465 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
466 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
468 ir_graph *rem = current_ir_graph;
469 ir_node *block = get_irg_start_block(irg);
471 current_ir_graph = irg;
472 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
473 current_ir_graph = rem;
476 } /* new_rd_SymConst_type */
479 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
480 symconst_symbol value, symconst_kind symkind) {
481 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
482 } /* new_rd_SymConst */
484 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
486 sym.entity_p = symbol;
487 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
488 } /* new_rd_SymConst_addr_ent */
490 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
492 sym.entity_p = symbol;
493 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
494 } /* new_rd_SymConst_ofs_ent */
496 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
498 sym.ident_p = symbol;
499 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_name, tp);
500 } /* new_rd_SymConst_addr_name */
502 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
505 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
506 } /* new_rd_SymConst_type_tag */
508 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
511 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
512 } /* new_rd_SymConst_size */
514 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
517 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
518 } /* new_rd_SymConst_align */
521 new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[]) {
523 ir_graph *rem = current_ir_graph;
526 current_ir_graph = get_Block_irg(block);
527 res = new_bd_Sync(db, block);
528 current_ir_graph = rem;
530 for (i = 0; i < arity; ++i)
531 add_Sync_pred(res, in[i]);
537 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
540 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
541 set_irg_end_reg(irg, res);
542 IRN_VRFY_IRG(res, irg);
544 } /* new_rd_EndReg */
547 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
550 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
551 set_irg_end_except(irg, res);
552 IRN_VRFY_IRG (res, irg);
554 } /* new_rd_EndExcept */
556 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
557 int arity, ir_node *in[], ir_asm_constraint *inputs,
558 int n_outs, ir_asm_constraint *outputs,
559 int n_clobber, ident *clobber[], ident *asm_text) {
561 ir_graph *rem = current_ir_graph;
563 current_ir_graph = get_Block_irg(block);
564 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
565 current_ir_graph = rem;
570 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
571 return new_rd_Start(NULL, irg, block);
573 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
574 return new_rd_End(NULL, irg, block);
576 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
577 return new_rd_Const(NULL, irg, con);
579 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
580 return new_rd_Const_long(NULL, irg, mode, value);
582 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
583 return new_rd_Const_type(NULL, irg, con, tp);
585 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode,
586 symconst_symbol value, symconst_kind symkind) {
587 return new_rd_SymConst(NULL, irg, mode, value, symkind);
589 ir_node *new_r_simpleSel(ir_node *block, ir_node *store,
590 ir_node *objptr, ir_entity *ent) {
591 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
593 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode) {
594 return new_rd_Phi(NULL, block, arity, in, mode);
596 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[]) {
597 return new_rd_Sync(NULL, block, arity, in);
599 ir_node *new_r_defaultProj(ir_node *block, ir_node *arg, long max_proj) {
600 return new_rd_defaultProj(NULL, block, arg, max_proj);
602 ir_node *new_r_Bad(ir_graph *irg) {
603 return get_irg_bad(irg);
605 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
606 return new_rd_EndReg(NULL, irg, block);
608 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
609 return new_rd_EndExcept(NULL, irg, block);
611 ir_node *new_r_NoMem(ir_graph *irg) {
612 return get_irg_no_mem(irg);
614 ir_node *new_r_ASM(ir_node *block,
615 int arity, ir_node *in[], ir_asm_constraint *inputs,
616 int n_outs, ir_asm_constraint *outputs,
617 int n_clobber, ident *clobber[], ident *asm_text) {
618 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
621 /** ********************/
622 /** public interfaces */
623 /** construction tools */
626 new_d_Start(dbg_info *db) {
629 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
630 op_Start, mode_T, 0, NULL);
632 res = optimize_node(res);
633 IRN_VRFY_IRG(res, current_ir_graph);
638 new_d_End(dbg_info *db) {
640 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
641 op_End, mode_X, -1, NULL);
642 res = optimize_node(res);
643 IRN_VRFY_IRG(res, current_ir_graph);
648 /* ***********************************************************************/
649 /* Methods necessary for automatic Phi node creation */
651 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
652 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
653 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
654 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
656 Call Graph: ( A ---> B == A "calls" B)
658 get_value mature_immBlock
666 get_r_value_internal |
670 new_rd_Phi0 new_rd_Phi_in
672 * *************************************************************************** */
674 /** Creates a Phi node with 0 predecessors. */
675 static inline ir_node *
676 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
679 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
680 IRN_VRFY_IRG(res, irg);
686 * Internal constructor of a Phi node by a phi_merge operation.
688 * @param irg the graph on which the Phi will be constructed
689 * @param block the block in which the Phi will be constructed
690 * @param mode the mod eof the Phi node
691 * @param in the input array of the phi node
692 * @param ins number of elements in the input array
693 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
694 * the value for which the new Phi is constructed
696 static inline ir_node *
697 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
698 ir_node **in, int ins, ir_node *phi0) {
700 ir_node *res, *known;
702 /* Allocate a new node on the obstack. The allocation copies the in
704 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
705 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
707 /* This loop checks whether the Phi has more than one predecessor.
708 If so, it is a real Phi node and we break the loop. Else the
709 Phi node merges the same definition on several paths and therefore
711 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
713 for (i = ins - 1; i >= 0; --i) {
716 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
718 /* Optimize self referencing Phis: We can't detect them yet properly, as
719 they still refer to the Phi0 they will replace. So replace right now. */
720 if (phi0 && in[i] == phi0)
723 if (in[i] == res || in[i] == known)
732 /* i < 0: there is at most one predecessor, we don't need a phi node. */
735 edges_node_deleted(res, current_ir_graph);
736 obstack_free(current_ir_graph->obst, res);
738 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
739 order, an enclosing Phi know may get superfluous. */
740 res = optimize_in_place_2(known);
742 exchange(known, res);
747 /* A undefined value, e.g., in unreachable code. */
751 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
752 IRN_VRFY_IRG(res, irg);
753 /* Memory Phis in endless loops must be kept alive.
754 As we can't distinguish these easily we keep all of them alive. */
755 if (is_Phi(res) && mode == mode_M)
756 add_End_keepalive(get_irg_end(irg), res);
760 } /* new_rd_Phi_in */
763 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
766 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
769 * Construct a new frag_array for node n.
770 * Copy the content from the current graph_arr of the corresponding block:
771 * this is the current state.
772 * Set ProjM(n) as current memory state.
773 * Further the last entry in frag_arr of current block points to n. This
774 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
776 static inline ir_node **new_frag_arr(ir_node *n) {
780 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
781 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
782 sizeof(ir_node *)*current_ir_graph->n_loc);
784 /* turn off optimization before allocating Proj nodes, as res isn't
786 opt = get_opt_optimize(); set_optimize(0);
787 /* Here we rely on the fact that all frag ops have Memory as first result! */
789 arr[0] = new_Proj(n, mode_M, pn_Call_M);
790 } else if (is_CopyB(n)) {
791 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
793 assert((pn_Quot_M == pn_DivMod_M) &&
794 (pn_Quot_M == pn_Div_M) &&
795 (pn_Quot_M == pn_Mod_M) &&
796 (pn_Quot_M == pn_Load_M) &&
797 (pn_Quot_M == pn_Store_M) &&
798 (pn_Quot_M == pn_Alloc_M) &&
799 (pn_Quot_M == pn_Bound_M));
800 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
804 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
809 * Returns the frag_arr from a node.
811 static inline ir_node **get_frag_arr(ir_node *n) {
812 switch (get_irn_opcode(n)) {
814 return n->attr.call.exc.frag_arr;
816 return n->attr.alloc.exc.frag_arr;
818 return n->attr.load.exc.frag_arr;
820 return n->attr.store.exc.frag_arr;
822 return n->attr.except.frag_arr;
827 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
831 for (i = 1024; i >= 0; --i)
836 if (frag_arr[pos] == NULL)
838 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
839 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
840 assert(arr != frag_arr && "Endless recursion detected");
845 assert(!"potential endless recursion in set_frag_value");
846 } /* set_frag_value */
849 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
853 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
855 frag_arr = get_frag_arr(cfOp);
858 if (block->attr.block.graph_arr[pos] != NULL) {
859 /* There was a set_value() after the cfOp and no get_value() before that
860 set_value(). We must build a Phi node now. */
861 if (block->attr.block.is_matured) {
862 int ins = get_irn_arity(block);
864 NEW_ARR_A(ir_node *, nin, ins);
865 res = phi_merge(block, pos, mode, nin, ins);
867 res = new_rd_Phi0(current_ir_graph, block, mode);
868 res->attr.phi.u.pos = pos;
869 res->attr.phi.next = block->attr.block.phis;
870 block->attr.block.phis = res;
873 /* It's a Phi, we can write this into all graph_arrs with NULL */
874 set_frag_value(block->attr.block.graph_arr, pos, res);
876 res = get_r_value_internal(block, pos, mode);
877 set_frag_value(block->attr.block.graph_arr, pos, res);
881 } /* get_r_frag_value_internal */
884 * Check whether a control flownode cf_pred represents an exception flow.
886 * @param cf_pred the control flow node
887 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
889 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
891 * Note: all projections from a raise are "exceptional control flow" we we handle it
892 * like a normal Jmp, because there is no "regular" one.
893 * That's why Raise is no "fragile_op"!
895 if (is_fragile_op(prev_cf_op)) {
896 if (is_Proj(cf_pred)) {
897 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
898 /* the regular control flow, NO exception */
901 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
904 /* Hmm, exception but not a Proj? */
905 assert(!"unexpected condition: fragile op without a proj");
909 } /* is_exception_flow */
912 * Computes the predecessors for the real phi node, and then
913 * allocates and returns this node. The routine called to allocate the
914 * node might optimize it away and return a real value.
915 * This function must be called with an in-array of proper size.
918 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
919 ir_node *prevBlock, *res, *phi0, *phi0_all;
922 /* If this block has no value at pos create a Phi0 and remember it
923 in graph_arr to break recursions.
924 Else we may not set graph_arr as there a later value is remembered. */
926 if (block->attr.block.graph_arr[pos] == NULL) {
927 ir_graph *irg = current_ir_graph;
929 if (block == get_irg_start_block(irg)) {
930 /* Collapsing to Bad tarvals is no good idea.
931 So we call a user-supplied routine here that deals with this case as
932 appropriate for the given language. Sorrily the only help we can give
933 here is the position.
935 Even if all variables are defined before use, it can happen that
936 we get to the start block, if a Cond has been replaced by a tuple
937 (bad, jmp). In this case we call the function needlessly, eventually
938 generating an non existent error.
939 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
942 if (default_initialize_local_variable != NULL) {
943 ir_node *rem = get_cur_block();
945 set_cur_block(block);
946 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
950 block->attr.block.graph_arr[pos] = new_Unknown(mode);
951 /* We don't need to care about exception ops in the start block.
952 There are none by definition. */
953 return block->attr.block.graph_arr[pos];
955 phi0 = new_rd_Phi0(irg, block, mode);
956 block->attr.block.graph_arr[pos] = phi0;
957 if (get_opt_precise_exc_context()) {
958 /* Set graph_arr for fragile ops. Also here we should break recursion.
959 We could choose a cyclic path through an cfop. But the recursion would
960 break at some point. */
961 set_frag_value(block->attr.block.graph_arr, pos, phi0);
966 /* This loop goes to all predecessor blocks of the block the Phi node
967 is in and there finds the operands of the Phi node by calling
968 get_r_value_internal. */
969 for (i = 1; i <= ins; ++i) {
970 ir_node *cf_pred = block->in[i];
971 ir_node *prevCfOp = skip_Proj(cf_pred);
973 if (is_Bad(prevCfOp)) {
974 /* In case a Cond has been optimized we would get right to the start block
975 with an invalid definition. */
976 nin[i-1] = new_Bad();
979 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
981 if (!is_Bad(prevBlock)) {
982 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
983 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
984 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
986 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
988 nin[i-1] = new_Bad();
992 /* We want to pass the Phi0 node to the constructor: this finds additional
993 optimization possibilities.
994 The Phi0 node either is allocated in this function, or it comes from
995 a former call to get_r_value_internal(). In this case we may not yet
996 exchange phi0, as this is done in mature_immBlock(). */
998 phi0_all = block->attr.block.graph_arr[pos];
999 if (! is_Phi0(phi0_all) ||
1000 get_irn_arity(phi0_all) != 0 ||
1001 get_nodes_block(phi0_all) != block)
1007 /* After collecting all predecessors into the array nin a new Phi node
1008 with these predecessors is created. This constructor contains an
1009 optimization: If all predecessors of the Phi node are identical it
1010 returns the only operand instead of a new Phi node. */
1011 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1013 /* In case we allocated a Phi0 node at the beginning of this procedure,
1014 we need to exchange this Phi0 with the real Phi. */
1016 exchange(phi0, res);
1017 block->attr.block.graph_arr[pos] = res;
1018 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1019 only an optimization. */
1026 * This function returns the last definition of a value. In case
1027 * this value was last defined in a previous block, Phi nodes are
1028 * inserted. If the part of the firm graph containing the definition
1029 * is not yet constructed, a dummy Phi node is returned.
1031 * @param block the current block
1032 * @param pos the value number of the value searched
1033 * @param mode the mode of this value (needed for Phi construction)
1036 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
1038 /* There are 4 cases to treat.
1040 1. The block is not mature and we visit it the first time. We can not
1041 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1042 predecessors is returned. This node is added to the linked list (block
1043 attribute "phis") of the containing block to be completed when this block is
1044 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1047 2. The value is already known in this block, graph_arr[pos] is set and we
1048 visit the block the first time. We can return the value without
1049 creating any new nodes.
1051 3. The block is mature and we visit it the first time. A Phi node needs
1052 to be created (phi_merge). If the Phi is not needed, as all it's
1053 operands are the same value reaching the block through different
1054 paths, it's optimized away and the value itself is returned.
1056 4. The block is mature, and we visit it the second time. Now two
1057 subcases are possible:
1058 * The value was computed completely the last time we were here. This
1059 is the case if there is no loop. We can return the proper value.
1060 * The recursion that visited this node and set the flag did not
1061 return yet. We are computing a value in a loop and need to
1062 break the recursion. This case only happens if we visited
1063 the same block with phi_merge before, which inserted a Phi0.
1064 So we return the Phi0.
1067 /* case 4 -- already visited. */
1068 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1069 /* As phi_merge allocates a Phi0 this value is always defined. Here
1070 is the critical difference of the two algorithms. */
1071 assert(block->attr.block.graph_arr[pos]);
1072 return block->attr.block.graph_arr[pos];
1075 /* visited the first time */
1076 set_irn_visited(block, get_irg_visited(current_ir_graph));
1078 /* Get the local valid value */
1079 res = block->attr.block.graph_arr[pos];
1081 /* case 2 -- If the value is actually computed, return it. */
1085 if (block->attr.block.is_matured) { /* case 3 */
1087 /* The Phi has the same amount of ins as the corresponding block. */
1088 int ins = get_irn_arity(block);
1090 NEW_ARR_A(ir_node *, nin, ins);
1092 /* Phi merge collects the predecessors and then creates a node. */
1093 res = phi_merge(block, pos, mode, nin, ins);
1095 } else { /* case 1 */
1096 /* The block is not mature, we don't know how many in's are needed. A Phi
1097 with zero predecessors is created. Such a Phi node is called Phi0
1098 node. The Phi0 is then added to the list of Phi0 nodes in this block
1099 to be matured by mature_immBlock later.
1100 The Phi0 has to remember the pos of it's internal value. If the real
1101 Phi is computed, pos is used to update the array with the local
1103 res = new_rd_Phi0(current_ir_graph, block, mode);
1104 res->attr.phi.u.pos = pos;
1105 res->attr.phi.next = block->attr.block.phis;
1106 block->attr.block.phis = res;
1109 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1111 /* The local valid value is available now. */
1112 block->attr.block.graph_arr[pos] = res;
1115 } /* get_r_value_internal */
1117 /* ************************************************************************** */
1120 * Finalize a Block node, when all control flows are known.
1121 * Acceptable parameters are only Block nodes.
1124 mature_immBlock(ir_node *block) {
1129 assert(is_Block(block));
1130 if (!get_Block_matured(block)) {
1131 ir_graph *irg = current_ir_graph;
1133 ins = ARR_LEN(block->in) - 1;
1134 /* Fix block parameters */
1135 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1137 /* An array for building the Phi nodes. */
1138 NEW_ARR_A(ir_node *, nin, ins);
1140 /* Traverse a chain of Phi nodes attached to this block and mature
1142 for (n = block->attr.block.phis; n; n = next) {
1143 inc_irg_visited(irg);
1144 next = n->attr.phi.next;
1145 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1148 block->attr.block.is_matured = 1;
1150 /* Now, as the block is a finished Firm node, we can optimize it.
1151 Since other nodes have been allocated since the block was created
1152 we can not free the node on the obstack. Therefore we have to call
1153 optimize_in_place().
1154 Unfortunately the optimization does not change a lot, as all allocated
1155 nodes refer to the unoptimized node.
1156 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1157 block = optimize_in_place_2(block);
1158 IRN_VRFY_IRG(block, irg);
1160 } /* mature_immBlock */
1163 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
1164 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1168 new_d_Const(dbg_info *db, tarval *con) {
1169 return new_bd_Const(db, con);
1173 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
1174 return new_bd_Const_long(db, mode, value);
1175 } /* new_d_Const_long */
1178 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
1179 return new_bd_Const_type(db, con, tp);
1180 } /* new_d_Const_type */
1184 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
1186 assert(arg->op == op_Cond);
1187 arg->attr.cond.kind = fragmentary;
1188 arg->attr.cond.default_proj = max_proj;
1189 res = new_d_Proj(db, arg, mode_X, max_proj);
1191 } /* new_d_defaultProj */
1194 * Allocate a frag array for a node if the current graph state is phase_building.
1196 * @param irn the node for which the frag array should be allocated
1197 * @param op the opcode of the (original) node, if does not match opcode of irn,
1199 * @param frag_store the address of the frag store in irn attributes, if this
1200 * address contains a value != NULL, does nothing
1202 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
1203 if (get_opt_precise_exc_context()) {
1204 if ((current_ir_graph->phase_state == phase_building) &&
1205 (get_irn_op(irn) == op) && /* Could be optimized away. */
1206 !*frag_store) /* Could be a cse where the arr is already set. */ {
1207 *frag_store = new_frag_arr(irn);
1210 } /* firm_alloc_frag_arr */
1213 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1214 /* GL: objptr was called frame before. Frame was a bad choice for the name
1215 as the operand could as well be a pointer to a dynamic object. */
1217 return new_bd_Sel(db, current_ir_graph->current_block,
1218 store, objptr, 0, NULL, ent);
1219 } /* new_d_simpleSel */
1222 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
1223 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1225 } /* new_d_SymConst_type */
1228 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
1229 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1230 value, kind, firm_unknown_type);
1231 } /* new_d_SymConst */
1234 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
1235 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1239 new_d_EndReg(dbg_info *db) {
1240 return new_bd_EndReg(db, current_ir_graph->current_block);
1241 } /* new_d_EndReg */
1244 new_d_EndExcept(dbg_info *db) {
1245 return new_bd_EndExcept(db, current_ir_graph->current_block);
1246 } /* new_d_EndExcept */
1250 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1251 int n_outs, ir_asm_constraint *outputs,
1252 int n_clobber, ident *clobber[], ident *asm_text) {
1253 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1256 /* ********************************************************************* */
1257 /* Comfortable interface with automatic Phi node construction. */
1258 /* (Uses also constructors of ?? interface, except new_Block. */
1259 /* ********************************************************************* */
1261 /* Block construction */
1262 /* immature Block without predecessors */
1264 new_d_immBlock(dbg_info *db) {
1267 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1268 /* creates a new dynamic in-array as length of in is -1 */
1269 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1271 /* macroblock head */
1274 res->attr.block.is_matured = 0;
1275 res->attr.block.is_dead = 0;
1276 res->attr.block.is_mb_head = 1;
1277 res->attr.block.irg.irg = current_ir_graph;
1278 res->attr.block.backedge = NULL;
1279 res->attr.block.in_cg = NULL;
1280 res->attr.block.cg_backedge = NULL;
1281 res->attr.block.extblk = NULL;
1282 res->attr.block.region = NULL;
1283 res->attr.block.mb_depth = 0;
1284 res->attr.block.entity = NULL;
1286 set_Block_block_visited(res, 0);
1288 /* Create and initialize array for Phi-node construction. */
1289 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1290 current_ir_graph->n_loc);
1291 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1293 /* Immature block may not be optimized! */
1294 IRN_VRFY_IRG(res, current_ir_graph);
1297 } /* new_d_immBlock */
1300 new_immBlock(void) {
1301 return new_d_immBlock(NULL);
1302 } /* new_immBlock */
1304 /* immature PartBlock with its predecessors */
1306 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
1307 ir_node *res = new_d_immBlock(db);
1308 ir_node *blk = get_nodes_block(pred_jmp);
1310 res->in[0] = blk->in[0];
1311 assert(res->in[0] != NULL);
1312 add_immBlock_pred(res, pred_jmp);
1314 res->attr.block.is_mb_head = 0;
1315 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1318 } /* new_d_immPartBlock */
1321 new_immPartBlock(ir_node *pred_jmp) {
1322 return new_d_immPartBlock(NULL, pred_jmp);
1323 } /* new_immPartBlock */
1325 /* add an edge to a jmp/control flow node */
1327 add_immBlock_pred(ir_node *block, ir_node *jmp) {
1328 int n = ARR_LEN(block->in) - 1;
1330 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1331 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1332 assert(is_ir_node(jmp));
1334 ARR_APP1(ir_node *, block->in, jmp);
1336 hook_set_irn_n(block, n, jmp, NULL);
1337 } /* add_immBlock_pred */
1339 /* changing the current block */
1341 set_cur_block(ir_node *target) {
1342 current_ir_graph->current_block = target;
1343 } /* set_cur_block */
1345 /* ************************ */
1346 /* parameter administration */
1348 /* get a value from the parameter array from the current block by its index */
1350 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
1351 ir_graph *irg = current_ir_graph;
1352 assert(get_irg_phase_state(irg) == phase_building);
1353 inc_irg_visited(irg);
1358 return get_r_value_internal(irg->current_block, pos + 1, mode);
1361 /* get a value from the parameter array from the current block by its index */
1363 get_value(int pos, ir_mode *mode) {
1364 return get_d_value(NULL, pos, mode);
1367 /* set a value at position pos in the parameter array from the current block */
1369 set_value(int pos, ir_node *value) {
1370 ir_graph *irg = current_ir_graph;
1371 assert(get_irg_phase_state(irg) == phase_building);
1373 assert(pos+1 < irg->n_loc);
1374 assert(is_ir_node(value));
1375 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1378 /* Find the value number for a node in the current block.*/
1380 find_value(ir_node *value) {
1382 ir_node *bl = current_ir_graph->current_block;
1384 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1385 if (bl->attr.block.graph_arr[i] == value)
1390 /* get the current store */
1393 ir_graph *irg = current_ir_graph;
1395 assert(get_irg_phase_state(irg) == phase_building);
1396 /* GL: one could call get_value instead */
1397 inc_irg_visited(irg);
1398 return get_r_value_internal(irg->current_block, 0, mode_M);
1401 /* set the current store: handles automatic Sync construction for Load nodes */
1403 set_store(ir_node *store) {
1404 ir_node *load, *pload, *pred, *in[2];
1406 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1407 /* Beware: due to dead code elimination, a store might become a Bad node even in
1408 the construction phase. */
1409 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1411 if (get_opt_auto_create_sync()) {
1412 /* handle non-volatile Load nodes by automatically creating Sync's */
1413 load = skip_Proj(store);
1414 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1415 pred = get_Load_mem(load);
1417 if (is_Sync(pred)) {
1418 /* a Load after a Sync: move it up */
1419 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1421 set_Load_mem(load, get_memop_mem(mem));
1422 add_Sync_pred(pred, store);
1425 pload = skip_Proj(pred);
1426 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1427 /* a Load after a Load: create a new Sync */
1428 set_Load_mem(load, get_Load_mem(pload));
1432 store = new_Sync(2, in);
1437 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1441 keep_alive(ir_node *ka) {
1442 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1445 /* --- Useful access routines --- */
1446 /* Returns the current block of the current graph. To set the current
1447 block use set_cur_block. */
1448 ir_node *get_cur_block(void) {
1449 return get_irg_current_block(current_ir_graph);
1450 } /* get_cur_block */
1452 /* Returns the frame type of the current graph */
1453 ir_type *get_cur_frame_type(void) {
1454 return get_irg_frame_type(current_ir_graph);
1455 } /* get_cur_frame_type */
1458 /* ********************************************************************* */
1461 /* call once for each run of the library */
1463 firm_init_cons(uninitialized_local_variable_func_t *func) {
1464 default_initialize_local_variable = func;
1465 } /* firm_init_cons */
1468 irp_finalize_cons(void) {
1470 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1471 irg_finalize_cons(get_irp_irg(i));
1473 irp->phase_state = phase_high;
1474 } /* irp_finalize_cons */
1476 ir_node *new_Start(void) {
1477 return new_d_Start(NULL);
1479 ir_node *new_End(void) {
1480 return new_d_End(NULL);
1482 ir_node *new_Const(tarval *con) {
1483 return new_d_Const(NULL, con);
1486 ir_node *new_Const_long(ir_mode *mode, long value) {
1487 return new_d_Const_long(NULL, mode, value);
1490 ir_node *new_Const_type(tarval *con, ir_type *tp) {
1491 return new_d_Const_type(NULL, con, tp);
1494 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
1495 return new_d_SymConst_type(NULL, mode, value, kind, type);
1497 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
1498 return new_d_SymConst(NULL, mode, value, kind);
1500 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
1501 return new_d_simpleSel(NULL, store, objptr, ent);
1503 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
1504 return new_d_Phi(NULL, arity, in, mode);
1506 ir_node *new_Sync(int arity, ir_node *in[]) {
1507 return new_d_Sync(NULL, arity, in);
1509 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
1510 return new_d_defaultProj(NULL, arg, max_proj);
1512 ir_node *new_Bad(void) {
1513 return get_irg_bad(current_ir_graph);
1515 ir_node *new_EndReg(void) {
1516 return new_d_EndReg(NULL);
1518 ir_node *new_EndExcept(void) {
1519 return new_d_EndExcept(NULL);
1521 ir_node *new_NoMem(void) {
1522 return get_irg_no_mem(current_ir_graph);
1524 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1525 int n_outs, ir_asm_constraint *outputs,
1526 int n_clobber, ident *clobber[], ident *asm_text) {
1527 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1530 /* create a new anchor node */
1531 ir_node *new_Anchor(ir_graph *irg) {
1532 ir_node *in[anchor_last];
1533 memset(in, 0, sizeof(in));
1534 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);