2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
46 /* when we need verifying */
48 # define IRN_VRFY_IRG(res, irg)
50 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
54 * Language dependent variable initialization callback.
56 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
58 /* creates a bd constructor for a binop */
59 #define NEW_BD_BINOP(instr) \
61 new_bd_##instr(dbg_info *db, ir_node *block, \
62 ir_node *op1, ir_node *op2, ir_mode *mode) \
66 ir_graph *irg = current_ir_graph; \
69 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
70 res = optimize_node(res); \
71 IRN_VRFY_IRG(res, irg); \
75 /* creates a bd constructor for an unop */
76 #define NEW_BD_UNOP(instr) \
78 new_bd_##instr(dbg_info *db, ir_node *block, \
79 ir_node *op, ir_mode *mode) \
82 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an divop */
90 #define NEW_BD_DIVOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
97 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
102 res->attr.divmod.exc.pin_state = state; \
103 res->attr.divmod.resmode = mode; \
104 res->attr.divmod.no_remainder = 0; \
105 res = optimize_node(res); \
106 IRN_VRFY_IRG(res, irg); \
110 /* creates a rd constructor for a binop */
111 #define NEW_RD_BINOP(instr) \
113 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
114 ir_node *op1, ir_node *op2, ir_mode *mode) \
117 ir_graph *rem = current_ir_graph; \
118 current_ir_graph = irg; \
119 res = new_bd_##instr(db, block, op1, op2, mode); \
120 current_ir_graph = rem; \
124 /* creates a rd constructor for an unop */
125 #define NEW_RD_UNOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an divop */
139 #define NEW_RD_DIVOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
148 current_ir_graph = rem; \
152 /* creates a d constructor for an binop */
153 #define NEW_D_BINOP(instr) \
155 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
156 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
159 /* creates a d constructor for an unop */
160 #define NEW_D_UNOP(instr) \
162 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
163 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
166 #include "gen_ir_cons.c.inl"
168 static ir_node *new_bd_Start(dbg_info *db, ir_node *block)
171 ir_graph *irg = current_ir_graph;
173 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
175 IRN_VRFY_IRG(res, irg);
179 static ir_node *new_bd_End(dbg_info *db, ir_node *block)
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
186 IRN_VRFY_IRG(res, irg);
191 * Creates a Phi node with all predecessors. Calling this constructor
192 * is only allowed if the corresponding block is mature.
194 static ir_node *new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
197 ir_graph *irg = current_ir_graph;
201 /* Don't assert that block matured: the use of this constructor is strongly
203 if (get_Block_matured(block))
204 assert(get_irn_arity(block) == arity);
206 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
208 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
210 for (i = arity - 1; i >= 0; --i)
211 if (is_Unknown(in[i])) {
216 if (!has_unknown) res = optimize_node(res);
217 IRN_VRFY_IRG(res, irg);
219 /* Memory Phis in endless loops must be kept alive.
220 As we can't distinguish these easily we keep all of them alive. */
221 if (is_Phi(res) && mode == mode_M)
222 add_End_keepalive(get_irg_end(irg), res);
226 static ir_node *new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
229 ir_graph *irg = current_ir_graph;
231 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
232 res->attr.con.tv = con;
233 set_Const_type(res, tp); /* Call method because of complex assertion. */
234 res = optimize_node (res);
235 assert(get_Const_type(res) == tp);
236 IRN_VRFY_IRG(res, irg);
239 } /* new_bd_Const_type */
241 static ir_node *new_bd_Const(dbg_info *db, tarval *con)
243 ir_graph *irg = current_ir_graph;
245 return new_rd_Const_type(db, irg, con, firm_unknown_type);
248 static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
250 ir_graph *irg = current_ir_graph;
252 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
253 } /* new_bd_Const_long */
255 static ir_node *new_bd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
259 assert(arg->op == op_Cond);
260 arg->attr.cond.default_proj = max_proj;
261 res = new_rd_Proj(db, arg, mode_X, max_proj);
263 } /* new_bd_defaultProj */
265 static ir_node *new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store,
266 ir_node *objptr, int arity, ir_node **in,
272 ir_graph *irg = current_ir_graph;
273 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
275 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
278 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
281 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
283 * Sel's can select functions which should be of mode mode_P_code.
285 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
286 res->attr.sel.entity = ent;
287 res = optimize_node(res);
288 IRN_VRFY_IRG(res, irg);
292 static ir_node *new_bd_SymConst_type(dbg_info *db, ir_node *block,
293 ir_mode *mode, symconst_symbol value,
294 symconst_kind symkind, ir_type *tp)
296 ir_graph *irg = current_ir_graph;
297 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
299 res->attr.symc.kind = symkind;
300 res->attr.symc.sym = value;
301 res->attr.symc.tp = tp;
303 res = optimize_node(res);
304 IRN_VRFY_IRG(res, irg);
306 } /* new_bd_SymConst_type */
308 static ir_node *new_bd_Sync(dbg_info *db, ir_node *block)
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
314 /* no need to call optimize node here, Sync are always created with no predecessors */
315 IRN_VRFY_IRG(res, irg);
320 static ir_node *new_bd_EndReg(dbg_info *db, ir_node *block)
323 ir_graph *irg = current_ir_graph;
325 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
326 set_irg_end_reg(irg, res);
327 IRN_VRFY_IRG(res, irg);
329 } /* new_bd_EndReg */
331 static ir_node *new_bd_EndExcept(dbg_info *db, ir_node *block)
334 ir_graph *irg = current_ir_graph;
336 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
337 set_irg_end_except(irg, res);
338 IRN_VRFY_IRG (res, irg);
340 } /* new_bd_EndExcept */
342 static ir_node *new_bd_ASM(dbg_info *db, ir_node *block, int arity,
343 ir_node *in[], ir_asm_constraint *inputs, int n_outs,
344 ir_asm_constraint *outputs, int n_clobber,
345 ident *clobber[], ident *asm_text)
348 ir_graph *irg = current_ir_graph;
350 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
351 res->attr.assem.pin_state = op_pin_state_pinned;
352 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
353 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
354 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
355 res->attr.assem.asm_text = asm_text;
357 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
358 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
359 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
366 /* --------------------------------------------- */
367 /* private interfaces, for professional use only */
368 /* --------------------------------------------- */
370 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
372 ir_graph *rem = current_ir_graph;
375 current_ir_graph = irg;
376 res = new_bd_Start(db, block);
377 current_ir_graph = rem;
382 ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
385 ir_graph *rem = current_ir_graph;
387 current_ir_graph = irg;
388 res = new_bd_End(db, block);
389 current_ir_graph = rem;
394 /* Creates a Phi node with all predecessors. Calling this constructor
395 is only allowed if the corresponding block is mature. */
396 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
399 ir_graph *rem = current_ir_graph;
401 current_ir_graph = get_Block_irg(block);
402 res = new_bd_Phi(db, block,arity, in, mode);
403 current_ir_graph = rem;
408 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
411 ir_graph *rem = current_ir_graph;
413 current_ir_graph = irg;
414 res = new_bd_Const_type(db, con, tp);
415 current_ir_graph = rem;
418 } /* new_rd_Const_type */
420 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
423 //#ifdef USE_ORIGINAL
424 ir_graph *rem = current_ir_graph;
426 current_ir_graph = irg;
427 res = new_bd_Const_type(db, con, firm_unknown_type);
428 current_ir_graph = rem;
430 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
436 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
438 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
439 } /* new_rd_Const_long */
441 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
443 return new_bd_defaultProj(db, arg, max_proj);
444 } /* new_rd_defaultProj */
446 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
447 ir_node *objptr, ir_entity *ent)
450 ir_graph *rem = current_ir_graph;
452 current_ir_graph = get_Block_irg(block);
453 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
454 current_ir_graph = rem;
457 } /* new_rd_simpleSel */
459 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
460 symconst_symbol value, symconst_kind symkind,
464 ir_graph *rem = current_ir_graph;
465 ir_node *block = get_irg_start_block(irg);
467 current_ir_graph = irg;
468 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
469 current_ir_graph = rem;
472 } /* new_rd_SymConst_type */
474 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
475 symconst_symbol value, symconst_kind symkind)
477 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
478 } /* new_rd_SymConst */
480 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
483 sym.entity_p = symbol;
484 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
485 } /* new_rd_SymConst_addr_ent */
487 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
490 sym.entity_p = symbol;
491 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
492 } /* new_rd_SymConst_ofs_ent */
494 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
498 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
499 } /* new_rd_SymConst_type_tag */
501 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
505 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
506 } /* new_rd_SymConst_size */
508 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
512 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
513 } /* new_rd_SymConst_align */
515 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
518 ir_graph *rem = current_ir_graph;
521 current_ir_graph = get_Block_irg(block);
522 res = new_bd_Sync(db, block);
523 current_ir_graph = rem;
525 for (i = 0; i < arity; ++i)
526 add_Sync_pred(res, in[i]);
531 ir_node *new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
535 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
536 set_irg_end_reg(irg, res);
537 IRN_VRFY_IRG(res, irg);
539 } /* new_rd_EndReg */
541 ir_node *new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
545 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
546 set_irg_end_except(irg, res);
547 IRN_VRFY_IRG (res, irg);
549 } /* new_rd_EndExcept */
551 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
552 int arity, ir_node *in[], ir_asm_constraint *inputs,
553 int n_outs, ir_asm_constraint *outputs,
554 int n_clobber, ident *clobber[], ident *asm_text)
557 ir_graph *rem = current_ir_graph;
559 current_ir_graph = get_Block_irg(block);
560 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
561 current_ir_graph = rem;
566 ir_node *new_r_Start(ir_graph *irg, ir_node *block)
568 return new_rd_Start(NULL, irg, block);
570 ir_node *new_r_End(ir_graph *irg, ir_node *block)
572 return new_rd_End(NULL, irg, block);
574 ir_node *new_r_Const(ir_graph *irg, tarval *con)
576 return new_rd_Const(NULL, irg, con);
578 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
580 return new_rd_Const_long(NULL, irg, mode, value);
582 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
584 return new_rd_Const_type(NULL, irg, con, tp);
586 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
587 symconst_kind symkind)
589 return new_rd_SymConst(NULL, irg, mode, value, symkind);
591 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
594 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
596 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
598 return new_rd_Phi(NULL, block, arity, in, mode);
600 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
602 return new_rd_Sync(NULL, block, arity, in);
604 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
606 return new_rd_defaultProj(NULL, arg, max_proj);
608 ir_node *new_r_Bad(ir_graph *irg)
610 return get_irg_bad(irg);
612 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block)
614 return new_rd_EndReg(NULL, irg, block);
616 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block)
618 return new_rd_EndExcept(NULL, irg, block);
620 ir_node *new_r_NoMem(ir_graph *irg)
622 return get_irg_no_mem(irg);
624 ir_node *new_r_ASM(ir_node *block,
625 int arity, ir_node *in[], ir_asm_constraint *inputs,
626 int n_outs, ir_asm_constraint *outputs,
627 int n_clobber, ident *clobber[], ident *asm_text)
629 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
632 /** ********************/
633 /** public interfaces */
634 /** construction tools */
636 ir_node *new_d_Start(dbg_info *db)
640 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
641 op_Start, mode_T, 0, NULL);
643 res = optimize_node(res);
644 IRN_VRFY_IRG(res, current_ir_graph);
648 ir_node *new_d_End(dbg_info *db)
651 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
652 op_End, mode_X, -1, NULL);
653 res = optimize_node(res);
654 IRN_VRFY_IRG(res, current_ir_graph);
659 /* ***********************************************************************/
660 /* Methods necessary for automatic Phi node creation */
662 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
663 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
664 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
665 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
667 Call Graph: ( A ---> B == A "calls" B)
669 get_value mature_immBlock
677 get_r_value_internal |
681 new_rd_Phi0 new_rd_Phi_in
683 * *************************************************************************** */
685 /** Creates a Phi node with 0 predecessors. */
686 static inline ir_node *new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
690 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
691 IRN_VRFY_IRG(res, irg);
697 * Internal constructor of a Phi node by a phi_merge operation.
699 * @param irg the graph on which the Phi will be constructed
700 * @param block the block in which the Phi will be constructed
701 * @param mode the mod eof the Phi node
702 * @param in the input array of the phi node
703 * @param ins number of elements in the input array
704 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
705 * the value for which the new Phi is constructed
707 static inline ir_node *new_rd_Phi_in(ir_graph *irg, ir_node *block,
708 ir_mode *mode, ir_node **in, int ins,
712 ir_node *res, *known;
714 /* Allocate a new node on the obstack. The allocation copies the in
716 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
717 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
719 /* This loop checks whether the Phi has more than one predecessor.
720 If so, it is a real Phi node and we break the loop. Else the
721 Phi node merges the same definition on several paths and therefore
723 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
725 for (i = ins - 1; i >= 0; --i) {
728 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
730 /* Optimize self referencing Phis: We can't detect them yet properly, as
731 they still refer to the Phi0 they will replace. So replace right now. */
732 if (phi0 && in[i] == phi0)
735 if (in[i] == res || in[i] == known)
744 /* i < 0: there is at most one predecessor, we don't need a phi node. */
747 edges_node_deleted(res, current_ir_graph);
748 obstack_free(current_ir_graph->obst, res);
750 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
751 order, an enclosing Phi know may get superfluous. */
752 res = optimize_in_place_2(known);
754 exchange(known, res);
759 /* A undefined value, e.g., in unreachable code. */
763 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
764 IRN_VRFY_IRG(res, irg);
765 /* Memory Phis in endless loops must be kept alive.
766 As we can't distinguish these easily we keep all of them alive. */
767 if (is_Phi(res) && mode == mode_M)
768 add_End_keepalive(get_irg_end(irg), res);
772 } /* new_rd_Phi_in */
774 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
776 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
779 * Construct a new frag_array for node n.
780 * Copy the content from the current graph_arr of the corresponding block:
781 * this is the current state.
782 * Set ProjM(n) as current memory state.
783 * Further the last entry in frag_arr of current block points to n. This
784 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
786 static inline ir_node **new_frag_arr(ir_node *n)
791 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
792 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
793 sizeof(ir_node *)*current_ir_graph->n_loc);
795 /* turn off optimization before allocating Proj nodes, as res isn't
797 opt = get_opt_optimize(); set_optimize(0);
798 /* Here we rely on the fact that all frag ops have Memory as first result! */
800 arr[0] = new_Proj(n, mode_M, pn_Call_M);
801 } else if (is_CopyB(n)) {
802 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
804 assert((pn_Quot_M == pn_DivMod_M) &&
805 (pn_Quot_M == pn_Div_M) &&
806 (pn_Quot_M == pn_Mod_M) &&
807 (pn_Quot_M == pn_Load_M) &&
808 (pn_Quot_M == pn_Store_M) &&
809 (pn_Quot_M == pn_Alloc_M) &&
810 (pn_Quot_M == pn_Bound_M));
811 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
815 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
820 * Returns the frag_arr from a node.
822 static inline ir_node **get_frag_arr(ir_node *n)
824 switch (get_irn_opcode(n)) {
826 return n->attr.call.exc.frag_arr;
828 return n->attr.alloc.exc.frag_arr;
830 return n->attr.load.exc.frag_arr;
832 return n->attr.store.exc.frag_arr;
834 return n->attr.except.frag_arr;
838 static void set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
843 for (i = 1024; i >= 0; --i)
848 if (frag_arr[pos] == NULL)
850 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
851 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
852 assert(arr != frag_arr && "Endless recursion detected");
857 assert(!"potential endless recursion in set_frag_value");
858 } /* set_frag_value */
860 static ir_node *get_r_frag_value_internal(ir_node *block, ir_node *cfOp,
861 int pos, ir_mode *mode)
866 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
868 frag_arr = get_frag_arr(cfOp);
871 if (block->attr.block.graph_arr[pos] != NULL) {
872 /* There was a set_value() after the cfOp and no get_value() before that
873 set_value(). We must build a Phi node now. */
874 if (block->attr.block.is_matured) {
875 int ins = get_irn_arity(block);
877 NEW_ARR_A(ir_node *, nin, ins);
878 res = phi_merge(block, pos, mode, nin, ins);
880 res = new_rd_Phi0(current_ir_graph, block, mode);
881 res->attr.phi.u.pos = pos;
882 res->attr.phi.next = block->attr.block.phis;
883 block->attr.block.phis = res;
886 /* It's a Phi, we can write this into all graph_arrs with NULL */
887 set_frag_value(block->attr.block.graph_arr, pos, res);
889 res = get_r_value_internal(block, pos, mode);
890 set_frag_value(block->attr.block.graph_arr, pos, res);
894 } /* get_r_frag_value_internal */
897 * Check whether a control flownode cf_pred represents an exception flow.
899 * @param cf_pred the control flow node
900 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
902 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
905 * Note: all projections from a raise are "exceptional control flow" we we handle it
906 * like a normal Jmp, because there is no "regular" one.
907 * That's why Raise is no "fragile_op"!
909 if (is_fragile_op(prev_cf_op)) {
910 if (is_Proj(cf_pred)) {
911 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
912 /* the regular control flow, NO exception */
915 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
918 /* Hmm, exception but not a Proj? */
919 assert(!"unexpected condition: fragile op without a proj");
923 } /* is_exception_flow */
926 * Computes the predecessors for the real phi node, and then
927 * allocates and returns this node. The routine called to allocate the
928 * node might optimize it away and return a real value.
929 * This function must be called with an in-array of proper size.
931 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
933 ir_node *prevBlock, *res, *phi0, *phi0_all;
936 /* If this block has no value at pos create a Phi0 and remember it
937 in graph_arr to break recursions.
938 Else we may not set graph_arr as there a later value is remembered. */
940 if (block->attr.block.graph_arr[pos] == NULL) {
941 ir_graph *irg = current_ir_graph;
943 if (block == get_irg_start_block(irg)) {
944 /* Collapsing to Bad tarvals is no good idea.
945 So we call a user-supplied routine here that deals with this case as
946 appropriate for the given language. Sorrily the only help we can give
947 here is the position.
949 Even if all variables are defined before use, it can happen that
950 we get to the start block, if a Cond has been replaced by a tuple
951 (bad, jmp). In this case we call the function needlessly, eventually
952 generating an non existent error.
953 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
956 if (default_initialize_local_variable != NULL) {
957 ir_node *rem = get_cur_block();
959 set_cur_block(block);
960 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
964 block->attr.block.graph_arr[pos] = new_Unknown(mode);
965 /* We don't need to care about exception ops in the start block.
966 There are none by definition. */
967 return block->attr.block.graph_arr[pos];
969 phi0 = new_rd_Phi0(irg, block, mode);
970 block->attr.block.graph_arr[pos] = phi0;
971 if (get_opt_precise_exc_context()) {
972 /* Set graph_arr for fragile ops. Also here we should break recursion.
973 We could choose a cyclic path through an cfop. But the recursion would
974 break at some point. */
975 set_frag_value(block->attr.block.graph_arr, pos, phi0);
980 /* This loop goes to all predecessor blocks of the block the Phi node
981 is in and there finds the operands of the Phi node by calling
982 get_r_value_internal. */
983 for (i = 1; i <= ins; ++i) {
984 ir_node *cf_pred = block->in[i];
985 ir_node *prevCfOp = skip_Proj(cf_pred);
987 if (is_Bad(prevCfOp)) {
988 /* In case a Cond has been optimized we would get right to the start block
989 with an invalid definition. */
990 nin[i-1] = new_Bad();
993 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
995 if (!is_Bad(prevBlock)) {
996 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
997 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
998 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
1000 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
1002 nin[i-1] = new_Bad();
1006 /* We want to pass the Phi0 node to the constructor: this finds additional
1007 optimization possibilities.
1008 The Phi0 node either is allocated in this function, or it comes from
1009 a former call to get_r_value_internal(). In this case we may not yet
1010 exchange phi0, as this is done in mature_immBlock(). */
1012 phi0_all = block->attr.block.graph_arr[pos];
1013 if (! is_Phi0(phi0_all) ||
1014 get_irn_arity(phi0_all) != 0 ||
1015 get_nodes_block(phi0_all) != block)
1021 /* After collecting all predecessors into the array nin a new Phi node
1022 with these predecessors is created. This constructor contains an
1023 optimization: If all predecessors of the Phi node are identical it
1024 returns the only operand instead of a new Phi node. */
1025 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1027 /* In case we allocated a Phi0 node at the beginning of this procedure,
1028 we need to exchange this Phi0 with the real Phi. */
1030 exchange(phi0, res);
1031 block->attr.block.graph_arr[pos] = res;
1032 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1033 only an optimization. */
1040 * This function returns the last definition of a value. In case
1041 * this value was last defined in a previous block, Phi nodes are
1042 * inserted. If the part of the firm graph containing the definition
1043 * is not yet constructed, a dummy Phi node is returned.
1045 * @param block the current block
1046 * @param pos the value number of the value searched
1047 * @param mode the mode of this value (needed for Phi construction)
1049 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1052 /* There are 4 cases to treat.
1054 1. The block is not mature and we visit it the first time. We can not
1055 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1056 predecessors is returned. This node is added to the linked list (block
1057 attribute "phis") of the containing block to be completed when this block is
1058 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1061 2. The value is already known in this block, graph_arr[pos] is set and we
1062 visit the block the first time. We can return the value without
1063 creating any new nodes.
1065 3. The block is mature and we visit it the first time. A Phi node needs
1066 to be created (phi_merge). If the Phi is not needed, as all it's
1067 operands are the same value reaching the block through different
1068 paths, it's optimized away and the value itself is returned.
1070 4. The block is mature, and we visit it the second time. Now two
1071 subcases are possible:
1072 * The value was computed completely the last time we were here. This
1073 is the case if there is no loop. We can return the proper value.
1074 * The recursion that visited this node and set the flag did not
1075 return yet. We are computing a value in a loop and need to
1076 break the recursion. This case only happens if we visited
1077 the same block with phi_merge before, which inserted a Phi0.
1078 So we return the Phi0.
1081 /* case 4 -- already visited. */
1082 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1083 /* As phi_merge allocates a Phi0 this value is always defined. Here
1084 is the critical difference of the two algorithms. */
1085 assert(block->attr.block.graph_arr[pos]);
1086 return block->attr.block.graph_arr[pos];
1089 /* visited the first time */
1090 set_irn_visited(block, get_irg_visited(current_ir_graph));
1092 /* Get the local valid value */
1093 res = block->attr.block.graph_arr[pos];
1095 /* case 2 -- If the value is actually computed, return it. */
1099 if (block->attr.block.is_matured) { /* case 3 */
1101 /* The Phi has the same amount of ins as the corresponding block. */
1102 int ins = get_irn_arity(block);
1104 NEW_ARR_A(ir_node *, nin, ins);
1106 /* Phi merge collects the predecessors and then creates a node. */
1107 res = phi_merge(block, pos, mode, nin, ins);
1109 } else { /* case 1 */
1110 /* The block is not mature, we don't know how many in's are needed. A Phi
1111 with zero predecessors is created. Such a Phi node is called Phi0
1112 node. The Phi0 is then added to the list of Phi0 nodes in this block
1113 to be matured by mature_immBlock later.
1114 The Phi0 has to remember the pos of it's internal value. If the real
1115 Phi is computed, pos is used to update the array with the local
1117 res = new_rd_Phi0(current_ir_graph, block, mode);
1118 res->attr.phi.u.pos = pos;
1119 res->attr.phi.next = block->attr.block.phis;
1120 block->attr.block.phis = res;
1123 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1125 /* The local valid value is available now. */
1126 block->attr.block.graph_arr[pos] = res;
1129 } /* get_r_value_internal */
1131 /* ************************************************************************** */
1134 * Finalize a Block node, when all control flows are known.
1135 * Acceptable parameters are only Block nodes.
1137 void mature_immBlock(ir_node *block)
1143 assert(is_Block(block));
1144 if (!get_Block_matured(block)) {
1145 ir_graph *irg = current_ir_graph;
1147 ins = ARR_LEN(block->in) - 1;
1148 /* Fix block parameters */
1149 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1151 /* An array for building the Phi nodes. */
1152 NEW_ARR_A(ir_node *, nin, ins);
1154 /* Traverse a chain of Phi nodes attached to this block and mature
1156 for (n = block->attr.block.phis; n; n = next) {
1157 inc_irg_visited(irg);
1158 next = n->attr.phi.next;
1159 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1162 block->attr.block.is_matured = 1;
1164 /* Now, as the block is a finished Firm node, we can optimize it.
1165 Since other nodes have been allocated since the block was created
1166 we can not free the node on the obstack. Therefore we have to call
1167 optimize_in_place().
1168 Unfortunately the optimization does not change a lot, as all allocated
1169 nodes refer to the unoptimized node.
1170 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1171 block = optimize_in_place_2(block);
1172 IRN_VRFY_IRG(block, irg);
1174 } /* mature_immBlock */
1176 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
1178 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1181 ir_node *new_d_Const(dbg_info *db, tarval *con)
1183 return new_bd_Const(db, con);
1186 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
1188 return new_bd_Const_long(db, mode, value);
1189 } /* new_d_Const_long */
1191 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
1193 return new_bd_Const_type(db, con, tp);
1194 } /* new_d_Const_type */
1197 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
1200 assert(arg->op == op_Cond);
1201 arg->attr.cond.default_proj = max_proj;
1202 res = new_d_Proj(db, arg, mode_X, max_proj);
1204 } /* new_d_defaultProj */
1207 * Allocate a frag array for a node if the current graph state is phase_building.
1209 * @param irn the node for which the frag array should be allocated
1210 * @param op the opcode of the (original) node, if does not match opcode of irn,
1212 * @param frag_store the address of the frag store in irn attributes, if this
1213 * address contains a value != NULL, does nothing
1215 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
1217 if (get_opt_precise_exc_context()) {
1218 if ((current_ir_graph->phase_state == phase_building) &&
1219 (get_irn_op(irn) == op) && /* Could be optimized away. */
1220 !*frag_store) /* Could be a cse where the arr is already set. */ {
1221 *frag_store = new_frag_arr(irn);
1224 } /* firm_alloc_frag_arr */
1226 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1227 /* GL: objptr was called frame before. Frame was a bad choice for the name
1228 as the operand could as well be a pointer to a dynamic object. */
1230 return new_bd_Sel(db, current_ir_graph->current_block,
1231 store, objptr, 0, NULL, ent);
1232 } /* new_d_simpleSel */
1234 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
1236 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1238 } /* new_d_SymConst_type */
1240 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
1242 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1243 value, kind, firm_unknown_type);
1244 } /* new_d_SymConst */
1246 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
1248 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1251 ir_node *new_d_EndReg(dbg_info *db)
1253 return new_bd_EndReg(db, current_ir_graph->current_block);
1254 } /* new_d_EndReg */
1256 ir_node *new_d_EndExcept(dbg_info *db)
1258 return new_bd_EndExcept(db, current_ir_graph->current_block);
1259 } /* new_d_EndExcept */
1262 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1263 int n_outs, ir_asm_constraint *outputs, int n_clobber,
1264 ident *clobber[], ident *asm_text)
1266 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1269 /* ********************************************************************* */
1270 /* Comfortable interface with automatic Phi node construction. */
1271 /* (Uses also constructors of ?? interface, except new_Block. */
1272 /* ********************************************************************* */
1274 /* Block construction */
1275 /* immature Block without predecessors */
1276 ir_node *new_d_immBlock(dbg_info *db)
1280 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1281 /* creates a new dynamic in-array as length of in is -1 */
1282 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1284 /* macroblock head */
1287 res->attr.block.is_matured = 0;
1288 res->attr.block.is_dead = 0;
1289 res->attr.block.is_mb_head = 1;
1290 res->attr.block.irg.irg = current_ir_graph;
1291 res->attr.block.backedge = NULL;
1292 res->attr.block.in_cg = NULL;
1293 res->attr.block.cg_backedge = NULL;
1294 res->attr.block.extblk = NULL;
1295 res->attr.block.region = NULL;
1296 res->attr.block.mb_depth = 0;
1297 res->attr.block.entity = NULL;
1299 set_Block_block_visited(res, 0);
1301 /* Create and initialize array for Phi-node construction. */
1302 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1303 current_ir_graph->n_loc);
1304 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1306 /* Immature block may not be optimized! */
1307 IRN_VRFY_IRG(res, current_ir_graph);
1310 } /* new_d_immBlock */
1312 ir_node *new_immBlock(void)
1314 return new_d_immBlock(NULL);
1315 } /* new_immBlock */
1317 /* immature PartBlock with its predecessors */
1318 ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
1320 ir_node *res = new_d_immBlock(db);
1321 ir_node *blk = get_nodes_block(pred_jmp);
1323 res->in[0] = blk->in[0];
1324 assert(res->in[0] != NULL);
1325 add_immBlock_pred(res, pred_jmp);
1327 res->attr.block.is_mb_head = 0;
1328 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1331 } /* new_d_immPartBlock */
1333 ir_node *new_immPartBlock(ir_node *pred_jmp)
1335 return new_d_immPartBlock(NULL, pred_jmp);
1336 } /* new_immPartBlock */
1338 /* add an edge to a jmp/control flow node */
1339 void add_immBlock_pred(ir_node *block, ir_node *jmp)
1341 int n = ARR_LEN(block->in) - 1;
1343 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1344 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1345 assert(is_ir_node(jmp));
1347 ARR_APP1(ir_node *, block->in, jmp);
1349 hook_set_irn_n(block, n, jmp, NULL);
1350 } /* add_immBlock_pred */
1352 /* changing the current block */
1353 void set_cur_block(ir_node *target)
1355 current_ir_graph->current_block = target;
1356 } /* set_cur_block */
1358 /* ************************ */
1359 /* parameter administration */
1361 /* get a value from the parameter array from the current block by its index */
1362 ir_node *get_d_value(dbg_info *db, int pos, ir_mode *mode)
1364 ir_graph *irg = current_ir_graph;
1365 assert(get_irg_phase_state(irg) == phase_building);
1366 inc_irg_visited(irg);
1371 return get_r_value_internal(irg->current_block, pos + 1, mode);
1374 /* get a value from the parameter array from the current block by its index */
1375 ir_node *get_value(int pos, ir_mode *mode)
1377 return get_d_value(NULL, pos, mode);
1380 /* set a value at position pos in the parameter array from the current block */
1381 void set_value(int pos, ir_node *value)
1383 ir_graph *irg = current_ir_graph;
1384 assert(get_irg_phase_state(irg) == phase_building);
1386 assert(pos+1 < irg->n_loc);
1387 assert(is_ir_node(value));
1388 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1391 /* Find the value number for a node in the current block.*/
1392 int find_value(ir_node *value)
1395 ir_node *bl = current_ir_graph->current_block;
1397 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1398 if (bl->attr.block.graph_arr[i] == value)
1403 /* get the current store */
1404 ir_node *get_store(void)
1406 ir_graph *irg = current_ir_graph;
1408 assert(get_irg_phase_state(irg) == phase_building);
1409 /* GL: one could call get_value instead */
1410 inc_irg_visited(irg);
1411 return get_r_value_internal(irg->current_block, 0, mode_M);
1414 /* set the current store: handles automatic Sync construction for Load nodes */
1415 void set_store(ir_node *store)
1417 ir_node *load, *pload, *pred, *in[2];
1419 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1420 /* Beware: due to dead code elimination, a store might become a Bad node even in
1421 the construction phase. */
1422 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1424 if (get_opt_auto_create_sync()) {
1425 /* handle non-volatile Load nodes by automatically creating Sync's */
1426 load = skip_Proj(store);
1427 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1428 pred = get_Load_mem(load);
1430 if (is_Sync(pred)) {
1431 /* a Load after a Sync: move it up */
1432 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1434 set_Load_mem(load, get_memop_mem(mem));
1435 add_Sync_pred(pred, store);
1438 pload = skip_Proj(pred);
1439 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1440 /* a Load after a Load: create a new Sync */
1441 set_Load_mem(load, get_Load_mem(pload));
1445 store = new_Sync(2, in);
1450 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1453 void keep_alive(ir_node *ka)
1455 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1458 /* --- Useful access routines --- */
1459 /* Returns the current block of the current graph. To set the current
1460 block use set_cur_block. */
1461 ir_node *get_cur_block(void)
1463 return get_irg_current_block(current_ir_graph);
1464 } /* get_cur_block */
1466 /* Returns the frame type of the current graph */
1467 ir_type *get_cur_frame_type(void)
1469 return get_irg_frame_type(current_ir_graph);
1470 } /* get_cur_frame_type */
1473 /* ********************************************************************* */
1476 /* call once for each run of the library */
1477 void firm_init_cons(uninitialized_local_variable_func_t *func)
1479 default_initialize_local_variable = func;
1480 } /* firm_init_cons */
1482 void irp_finalize_cons(void)
1485 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1486 irg_finalize_cons(get_irp_irg(i));
1488 irp->phase_state = phase_high;
1489 } /* irp_finalize_cons */
1491 ir_node *new_Start(void)
1493 return new_d_Start(NULL);
1495 ir_node *new_End(void)
1497 return new_d_End(NULL);
1499 ir_node *new_Const(tarval *con)
1501 return new_d_Const(NULL, con);
1504 ir_node *new_Const_long(ir_mode *mode, long value)
1506 return new_d_Const_long(NULL, mode, value);
1509 ir_node *new_Const_type(tarval *con, ir_type *tp)
1511 return new_d_Const_type(NULL, con, tp);
1514 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1516 return new_d_SymConst_type(NULL, mode, value, kind, type);
1518 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1520 return new_d_SymConst(NULL, mode, value, kind);
1522 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1524 return new_d_simpleSel(NULL, store, objptr, ent);
1526 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1528 return new_d_Phi(NULL, arity, in, mode);
1530 ir_node *new_Sync(int arity, ir_node *in[])
1532 return new_d_Sync(NULL, arity, in);
1534 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1536 return new_d_defaultProj(NULL, arg, max_proj);
1538 ir_node *new_Bad(void)
1540 return get_irg_bad(current_ir_graph);
1542 ir_node *new_EndReg(void)
1544 return new_d_EndReg(NULL);
1546 ir_node *new_EndExcept(void)
1548 return new_d_EndExcept(NULL);
1550 ir_node *new_NoMem(void)
1552 return get_irg_no_mem(current_ir_graph);
1554 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1555 int n_outs, ir_asm_constraint *outputs,
1556 int n_clobber, ident *clobber[], ident *asm_text)
1558 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1561 /* create a new anchor node */
1562 ir_node *new_Anchor(ir_graph *irg)
1564 ir_node *in[anchor_last];
1565 memset(in, 0, sizeof(in));
1566 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);