2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* when we need verifying */
49 # define IRN_VRFY_IRG(res, irg)
51 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
55 * Language dependent variable initialization callback.
57 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
59 /* creates a bd constructor for a binop */
60 #define NEW_BD_BINOP(instr) \
62 new_bd_##instr(dbg_info *db, ir_node *block, \
63 ir_node *op1, ir_node *op2, ir_mode *mode) \
67 ir_graph *irg = current_ir_graph; \
70 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
71 res = optimize_node(res); \
72 IRN_VRFY_IRG(res, irg); \
76 /* creates a bd constructor for an unop */
77 #define NEW_BD_UNOP(instr) \
79 new_bd_##instr(dbg_info *db, ir_node *block, \
80 ir_node *op, ir_mode *mode) \
83 ir_graph *irg = current_ir_graph; \
84 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
85 res = optimize_node(res); \
86 IRN_VRFY_IRG(res, irg); \
90 /* creates a bd constructor for an divop */
91 #define NEW_BD_DIVOP(instr) \
93 new_bd_##instr(dbg_info *db, ir_node *block, \
94 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
98 ir_graph *irg = current_ir_graph; \
102 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
103 res->attr.divmod.exc.pin_state = state; \
104 res->attr.divmod.resmode = mode; \
105 res->attr.divmod.no_remainder = 0; \
106 res = optimize_node(res); \
107 IRN_VRFY_IRG(res, irg); \
111 /* creates a rd constructor for a binop */
112 #define NEW_RD_BINOP(instr) \
114 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
115 ir_node *op1, ir_node *op2, ir_mode *mode) \
118 ir_graph *rem = current_ir_graph; \
119 current_ir_graph = irg; \
120 res = new_bd_##instr(db, block, op1, op2, mode); \
121 current_ir_graph = rem; \
125 /* creates a rd constructor for an unop */
126 #define NEW_RD_UNOP(instr) \
128 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
129 ir_node *op, ir_mode *mode) \
132 ir_graph *rem = current_ir_graph; \
133 current_ir_graph = irg; \
134 res = new_bd_##instr(db, block, op, mode); \
135 current_ir_graph = rem; \
139 /* creates a rd constructor for an divop */
140 #define NEW_RD_DIVOP(instr) \
142 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
143 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
146 ir_graph *rem = current_ir_graph; \
147 current_ir_graph = irg; \
148 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
149 current_ir_graph = rem; \
153 /* creates a d constructor for an binop */
154 #define NEW_D_BINOP(instr) \
156 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
157 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
160 /* creates a d constructor for an unop */
161 #define NEW_D_UNOP(instr) \
163 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
164 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
167 #include "gen_ir_cons.c.inl"
170 new_bd_Start(dbg_info *db, ir_node *block) {
172 ir_graph *irg = current_ir_graph;
174 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
176 IRN_VRFY_IRG(res, irg);
181 new_bd_End(dbg_info *db, ir_node *block) {
183 ir_graph *irg = current_ir_graph;
185 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
187 IRN_VRFY_IRG(res, irg);
192 * Creates a Phi node with all predecessors. Calling this constructor
193 * is only allowed if the corresponding block is mature.
196 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
198 ir_graph *irg = current_ir_graph;
202 /* Don't assert that block matured: the use of this constructor is strongly
204 if (get_Block_matured(block))
205 assert(get_irn_arity(block) == arity);
207 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
209 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
211 for (i = arity - 1; i >= 0; --i)
212 if (is_Unknown(in[i])) {
217 if (!has_unknown) res = optimize_node(res);
218 IRN_VRFY_IRG(res, irg);
220 /* Memory Phis in endless loops must be kept alive.
221 As we can't distinguish these easily we keep all of them alive. */
222 if (is_Phi(res) && mode == mode_M)
223 add_End_keepalive(get_irg_end(irg), res);
228 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
230 ir_graph *irg = current_ir_graph;
232 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
233 res->attr.con.tv = con;
234 set_Const_type(res, tp); /* Call method because of complex assertion. */
235 res = optimize_node (res);
236 assert(get_Const_type(res) == tp);
237 IRN_VRFY_IRG(res, irg);
240 } /* new_bd_Const_type */
243 new_bd_Const(dbg_info *db, tarval *con) {
244 ir_graph *irg = current_ir_graph;
246 return new_rd_Const_type (db, irg, con, firm_unknown_type);
250 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
251 ir_graph *irg = current_ir_graph;
253 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
254 } /* new_bd_Const_long */
257 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
260 ir_graph *irg = current_ir_graph;
262 assert(arg->op == op_Cond);
263 arg->attr.cond.kind = fragmentary;
264 arg->attr.cond.default_proj = max_proj;
265 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
267 } /* new_bd_defaultProj */
270 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
271 int arity, ir_node **in, ir_entity *ent) {
275 ir_graph *irg = current_ir_graph;
276 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
278 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
281 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
284 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
286 * Sel's can select functions which should be of mode mode_P_code.
288 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
289 res->attr.sel.entity = ent;
290 res = optimize_node(res);
291 IRN_VRFY_IRG(res, irg);
296 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
297 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
298 ir_graph *irg = current_ir_graph;
299 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
301 res->attr.symc.kind = symkind;
302 res->attr.symc.sym = value;
303 res->attr.symc.tp = tp;
305 res = optimize_node(res);
306 IRN_VRFY_IRG(res, irg);
308 } /* new_bd_SymConst_type */
311 new_bd_Sync(dbg_info *db, ir_node *block) {
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
316 /* no need to call optimize node here, Sync are always created with no predecessors */
317 IRN_VRFY_IRG(res, irg);
323 new_bd_EndReg(dbg_info *db, ir_node *block) {
325 ir_graph *irg = current_ir_graph;
327 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
328 set_irg_end_reg(irg, res);
329 IRN_VRFY_IRG(res, irg);
331 } /* new_bd_EndReg */
334 new_bd_EndExcept(dbg_info *db, ir_node *block) {
336 ir_graph *irg = current_ir_graph;
338 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
339 set_irg_end_except(irg, res);
340 IRN_VRFY_IRG (res, irg);
342 } /* new_bd_EndExcept */
345 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
346 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
348 ir_graph *irg = current_ir_graph;
350 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
351 res->attr.assem.pin_state = op_pin_state_pinned;
352 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
353 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
354 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
355 res->attr.assem.asm_text = asm_text;
357 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
358 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
359 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
366 /* --------------------------------------------- */
367 /* private interfaces, for professional use only */
368 /* --------------------------------------------- */
371 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
372 ir_graph *rem = current_ir_graph;
375 current_ir_graph = irg;
376 res = new_bd_Start(db, block);
377 current_ir_graph = rem;
383 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
385 ir_graph *rem = current_ir_graph;
387 current_ir_graph = irg;
388 res = new_bd_End(db, block);
389 current_ir_graph = rem;
394 /* Creates a Phi node with all predecessors. Calling this constructor
395 is only allowed if the corresponding block is mature. */
397 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
399 ir_graph *rem = current_ir_graph;
401 current_ir_graph = irg;
402 res = new_bd_Phi(db, block,arity, in, mode);
403 current_ir_graph = rem;
409 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
411 ir_graph *rem = current_ir_graph;
413 current_ir_graph = irg;
414 res = new_bd_Const_type(db, con, tp);
415 current_ir_graph = rem;
418 } /* new_rd_Const_type */
421 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
423 //#ifdef USE_ORIGINAL
424 ir_graph *rem = current_ir_graph;
426 current_ir_graph = irg;
427 res = new_bd_Const_type(db, con, firm_unknown_type);
428 current_ir_graph = rem;
430 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
437 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
438 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
439 } /* new_rd_Const_long */
442 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
445 ir_graph *rem = current_ir_graph;
447 current_ir_graph = irg;
448 res = new_bd_defaultProj(db, block, arg, max_proj);
449 current_ir_graph = rem;
452 } /* new_rd_defaultProj */
455 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
456 ir_node *store, ir_node *objptr, ir_entity *ent) {
458 ir_graph *rem = current_ir_graph;
460 current_ir_graph = irg;
461 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
462 current_ir_graph = rem;
465 } /* new_rd_simpleSel */
468 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
469 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
471 ir_graph *rem = current_ir_graph;
473 current_ir_graph = irg;
474 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
475 current_ir_graph = rem;
478 } /* new_rd_SymConst_type */
481 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
482 symconst_symbol value, symconst_kind symkind) {
483 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
484 } /* new_rd_SymConst */
486 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
488 sym.entity_p = symbol;
489 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
490 } /* new_rd_SymConst_addr_ent */
492 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
494 sym.entity_p = symbol;
495 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
496 } /* new_rd_SymConst_ofs_ent */
498 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
500 sym.ident_p = symbol;
501 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
502 } /* new_rd_SymConst_addr_name */
504 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
507 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
508 } /* new_rd_SymConst_type_tag */
510 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
513 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
514 } /* new_rd_SymConst_size */
516 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
519 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
520 } /* new_rd_SymConst_align */
523 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
525 ir_graph *rem = current_ir_graph;
528 current_ir_graph = irg;
529 res = new_bd_Sync(db, block);
530 current_ir_graph = rem;
532 for (i = 0; i < arity; ++i)
533 add_Sync_pred(res, in[i]);
539 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
542 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
543 set_irg_end_reg(irg, res);
544 IRN_VRFY_IRG(res, irg);
546 } /* new_rd_EndReg */
549 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
552 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
553 set_irg_end_except(irg, res);
554 IRN_VRFY_IRG (res, irg);
556 } /* new_rd_EndExcept */
558 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
559 int arity, ir_node *in[], ir_asm_constraint *inputs,
560 int n_outs, ir_asm_constraint *outputs,
561 int n_clobber, ident *clobber[], ident *asm_text) {
563 ir_graph *rem = current_ir_graph;
565 current_ir_graph = irg;
566 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
567 current_ir_graph = rem;
572 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
573 return new_rd_Start(NULL, irg, block);
575 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
576 return new_rd_End(NULL, irg, block);
578 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
579 return new_rd_Const(NULL, irg, con);
581 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
582 return new_rd_Const_long(NULL, irg, mode, value);
584 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
585 return new_rd_Const_type(NULL, irg, con, tp);
587 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
588 symconst_symbol value, symconst_kind symkind) {
589 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
591 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
592 ir_node *objptr, ir_entity *ent) {
593 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
595 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
596 ir_node **in, ir_mode *mode) {
597 return new_rd_Phi(NULL, irg, block, arity, in, mode);
599 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
600 return new_rd_Sync(NULL, irg, block, arity, in);
602 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
604 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
606 ir_node *new_r_Bad(ir_graph *irg) {
607 return get_irg_bad(irg);
609 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
610 return new_rd_EndReg(NULL, irg, block);
612 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
613 return new_rd_EndExcept(NULL, irg, block);
615 ir_node *new_r_NoMem(ir_graph *irg) {
616 return get_irg_no_mem(irg);
618 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
619 int arity, ir_node *in[], ir_asm_constraint *inputs,
620 int n_outs, ir_asm_constraint *outputs,
621 int n_clobber, ident *clobber[], ident *asm_text) {
622 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
625 /** ********************/
626 /** public interfaces */
627 /** construction tools */
630 new_d_Start(dbg_info *db) {
633 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
634 op_Start, mode_T, 0, NULL);
636 res = optimize_node(res);
637 IRN_VRFY_IRG(res, current_ir_graph);
642 new_d_End(dbg_info *db) {
644 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
645 op_End, mode_X, -1, NULL);
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, current_ir_graph);
652 /* ***********************************************************************/
653 /* Methods necessary for automatic Phi node creation */
655 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
656 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
657 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
658 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
660 Call Graph: ( A ---> B == A "calls" B)
662 get_value mature_immBlock
670 get_r_value_internal |
674 new_rd_Phi0 new_rd_Phi_in
676 * *************************************************************************** */
678 /** Creates a Phi node with 0 predecessors. */
679 static inline ir_node *
680 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
683 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
684 IRN_VRFY_IRG(res, irg);
690 * Internal constructor of a Phi node by a phi_merge operation.
692 * @param irg the graph on which the Phi will be constructed
693 * @param block the block in which the Phi will be constructed
694 * @param mode the mod eof the Phi node
695 * @param in the input array of the phi node
696 * @param ins number of elements in the input array
697 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
698 * the value for which the new Phi is constructed
700 static inline ir_node *
701 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
702 ir_node **in, int ins, ir_node *phi0) {
704 ir_node *res, *known;
706 /* Allocate a new node on the obstack. The allocation copies the in
708 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
709 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
711 /* This loop checks whether the Phi has more than one predecessor.
712 If so, it is a real Phi node and we break the loop. Else the
713 Phi node merges the same definition on several paths and therefore
715 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
717 for (i = ins - 1; i >= 0; --i) {
720 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
722 /* Optimize self referencing Phis: We can't detect them yet properly, as
723 they still refer to the Phi0 they will replace. So replace right now. */
724 if (phi0 && in[i] == phi0)
727 if (in[i] == res || in[i] == known)
736 /* i < 0: there is at most one predecessor, we don't need a phi node. */
739 edges_node_deleted(res, current_ir_graph);
740 obstack_free(current_ir_graph->obst, res);
742 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
743 order, an enclosing Phi know may get superfluous. */
744 res = optimize_in_place_2(known);
746 exchange(known, res);
751 /* A undefined value, e.g., in unreachable code. */
755 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
756 IRN_VRFY_IRG(res, irg);
757 /* Memory Phis in endless loops must be kept alive.
758 As we can't distinguish these easily we keep all of them alive. */
759 if (is_Phi(res) && mode == mode_M)
760 add_End_keepalive(get_irg_end(irg), res);
764 } /* new_rd_Phi_in */
767 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
769 #if PRECISE_EXC_CONTEXT
771 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
774 * Construct a new frag_array for node n.
775 * Copy the content from the current graph_arr of the corresponding block:
776 * this is the current state.
777 * Set ProjM(n) as current memory state.
778 * Further the last entry in frag_arr of current block points to n. This
779 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
781 static inline ir_node **new_frag_arr(ir_node *n) {
785 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
786 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
787 sizeof(ir_node *)*current_ir_graph->n_loc);
789 /* turn off optimization before allocating Proj nodes, as res isn't
791 opt = get_opt_optimize(); set_optimize(0);
792 /* Here we rely on the fact that all frag ops have Memory as first result! */
794 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
795 } else if (is_CopyB(n)) {
796 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
798 assert((pn_Quot_M == pn_DivMod_M) &&
799 (pn_Quot_M == pn_Div_M) &&
800 (pn_Quot_M == pn_Mod_M) &&
801 (pn_Quot_M == pn_Load_M) &&
802 (pn_Quot_M == pn_Store_M) &&
803 (pn_Quot_M == pn_Alloc_M) &&
804 (pn_Quot_M == pn_Bound_M));
805 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
809 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
814 * Returns the frag_arr from a node.
816 static inline ir_node **get_frag_arr(ir_node *n) {
817 switch (get_irn_opcode(n)) {
819 return n->attr.call.exc.frag_arr;
821 return n->attr.alloc.exc.frag_arr;
823 return n->attr.load.exc.frag_arr;
825 return n->attr.store.exc.frag_arr;
827 return n->attr.except.frag_arr;
832 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
836 for (i = 1024; i >= 0; --i)
841 if (frag_arr[pos] == NULL)
843 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
844 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
845 assert(arr != frag_arr && "Endless recursion detected");
850 assert(!"potential endless recursion in set_frag_value");
851 } /* set_frag_value */
854 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
858 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
860 frag_arr = get_frag_arr(cfOp);
863 if (block->attr.block.graph_arr[pos] != NULL) {
864 /* There was a set_value() after the cfOp and no get_value() before that
865 set_value(). We must build a Phi node now. */
866 if (block->attr.block.is_matured) {
867 int ins = get_irn_arity(block);
869 NEW_ARR_A(ir_node *, nin, ins);
870 res = phi_merge(block, pos, mode, nin, ins);
872 res = new_rd_Phi0(current_ir_graph, block, mode);
873 res->attr.phi.u.pos = pos;
874 res->attr.phi.next = block->attr.block.phis;
875 block->attr.block.phis = res;
878 /* It's a Phi, we can write this into all graph_arrs with NULL */
879 set_frag_value(block->attr.block.graph_arr, pos, res);
881 res = get_r_value_internal(block, pos, mode);
882 set_frag_value(block->attr.block.graph_arr, pos, res);
886 } /* get_r_frag_value_internal */
887 #endif /* PRECISE_EXC_CONTEXT */
890 * Check whether a control flownode cf_pred represents an exception flow.
892 * @param cf_pred the control flow node
893 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
895 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
897 * Note: all projections from a raise are "exceptional control flow" we we handle it
898 * like a normal Jmp, because there is no "regular" one.
899 * That's why Raise is no "fragile_op"!
901 if (is_fragile_op(prev_cf_op)) {
902 if (is_Proj(cf_pred)) {
903 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
904 /* the regular control flow, NO exception */
907 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
910 /* Hmm, exception but not a Proj? */
911 assert(!"unexpected condition: fragile op without a proj");
915 } /* is_exception_flow */
918 * Computes the predecessors for the real phi node, and then
919 * allocates and returns this node. The routine called to allocate the
920 * node might optimize it away and return a real value.
921 * This function must be called with an in-array of proper size.
924 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
925 ir_node *prevBlock, *res, *phi0, *phi0_all;
928 /* If this block has no value at pos create a Phi0 and remember it
929 in graph_arr to break recursions.
930 Else we may not set graph_arr as there a later value is remembered. */
932 if (block->attr.block.graph_arr[pos] == NULL) {
933 ir_graph *irg = current_ir_graph;
935 if (block == get_irg_start_block(irg)) {
936 /* Collapsing to Bad tarvals is no good idea.
937 So we call a user-supplied routine here that deals with this case as
938 appropriate for the given language. Sorrily the only help we can give
939 here is the position.
941 Even if all variables are defined before use, it can happen that
942 we get to the start block, if a Cond has been replaced by a tuple
943 (bad, jmp). In this case we call the function needlessly, eventually
944 generating an non existent error.
945 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
948 if (default_initialize_local_variable != NULL) {
949 ir_node *rem = get_cur_block();
951 set_cur_block(block);
952 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
956 block->attr.block.graph_arr[pos] = new_Unknown(mode);
957 /* We don't need to care about exception ops in the start block.
958 There are none by definition. */
959 return block->attr.block.graph_arr[pos];
961 phi0 = new_rd_Phi0(irg, block, mode);
962 block->attr.block.graph_arr[pos] = phi0;
963 #if PRECISE_EXC_CONTEXT
964 if (get_opt_precise_exc_context()) {
965 /* Set graph_arr for fragile ops. Also here we should break recursion.
966 We could choose a cyclic path through an cfop. But the recursion would
967 break at some point. */
968 set_frag_value(block->attr.block.graph_arr, pos, phi0);
974 /* This loop goes to all predecessor blocks of the block the Phi node
975 is in and there finds the operands of the Phi node by calling
976 get_r_value_internal. */
977 for (i = 1; i <= ins; ++i) {
978 ir_node *cf_pred = block->in[i];
979 ir_node *prevCfOp = skip_Proj(cf_pred);
981 if (is_Bad(prevCfOp)) {
982 /* In case a Cond has been optimized we would get right to the start block
983 with an invalid definition. */
984 nin[i-1] = new_Bad();
987 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
989 if (!is_Bad(prevBlock)) {
990 #if PRECISE_EXC_CONTEXT
991 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
992 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
993 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
996 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
998 nin[i-1] = new_Bad();
1002 /* We want to pass the Phi0 node to the constructor: this finds additional
1003 optimization possibilities.
1004 The Phi0 node either is allocated in this function, or it comes from
1005 a former call to get_r_value_internal(). In this case we may not yet
1006 exchange phi0, as this is done in mature_immBlock(). */
1008 phi0_all = block->attr.block.graph_arr[pos];
1009 if (! is_Phi0(phi0_all) ||
1010 get_irn_arity(phi0_all) != 0 ||
1011 get_nodes_block(phi0_all) != block)
1017 /* After collecting all predecessors into the array nin a new Phi node
1018 with these predecessors is created. This constructor contains an
1019 optimization: If all predecessors of the Phi node are identical it
1020 returns the only operand instead of a new Phi node. */
1021 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
1023 /* In case we allocated a Phi0 node at the beginning of this procedure,
1024 we need to exchange this Phi0 with the real Phi. */
1026 exchange(phi0, res);
1027 block->attr.block.graph_arr[pos] = res;
1028 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1029 only an optimization. */
1036 * This function returns the last definition of a value. In case
1037 * this value was last defined in a previous block, Phi nodes are
1038 * inserted. If the part of the firm graph containing the definition
1039 * is not yet constructed, a dummy Phi node is returned.
1041 * @param block the current block
1042 * @param pos the value number of the value searched
1043 * @param mode the mode of this value (needed for Phi construction)
1046 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
1048 /* There are 4 cases to treat.
1050 1. The block is not mature and we visit it the first time. We can not
1051 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1052 predecessors is returned. This node is added to the linked list (block
1053 attribute "phis") of the containing block to be completed when this block is
1054 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1057 2. The value is already known in this block, graph_arr[pos] is set and we
1058 visit the block the first time. We can return the value without
1059 creating any new nodes.
1061 3. The block is mature and we visit it the first time. A Phi node needs
1062 to be created (phi_merge). If the Phi is not needed, as all it's
1063 operands are the same value reaching the block through different
1064 paths, it's optimized away and the value itself is returned.
1066 4. The block is mature, and we visit it the second time. Now two
1067 subcases are possible:
1068 * The value was computed completely the last time we were here. This
1069 is the case if there is no loop. We can return the proper value.
1070 * The recursion that visited this node and set the flag did not
1071 return yet. We are computing a value in a loop and need to
1072 break the recursion. This case only happens if we visited
1073 the same block with phi_merge before, which inserted a Phi0.
1074 So we return the Phi0.
1077 /* case 4 -- already visited. */
1078 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1079 /* As phi_merge allocates a Phi0 this value is always defined. Here
1080 is the critical difference of the two algorithms. */
1081 assert(block->attr.block.graph_arr[pos]);
1082 return block->attr.block.graph_arr[pos];
1085 /* visited the first time */
1086 set_irn_visited(block, get_irg_visited(current_ir_graph));
1088 /* Get the local valid value */
1089 res = block->attr.block.graph_arr[pos];
1091 /* case 2 -- If the value is actually computed, return it. */
1095 if (block->attr.block.is_matured) { /* case 3 */
1097 /* The Phi has the same amount of ins as the corresponding block. */
1098 int ins = get_irn_arity(block);
1100 NEW_ARR_A(ir_node *, nin, ins);
1102 /* Phi merge collects the predecessors and then creates a node. */
1103 res = phi_merge(block, pos, mode, nin, ins);
1105 } else { /* case 1 */
1106 /* The block is not mature, we don't know how many in's are needed. A Phi
1107 with zero predecessors is created. Such a Phi node is called Phi0
1108 node. The Phi0 is then added to the list of Phi0 nodes in this block
1109 to be matured by mature_immBlock later.
1110 The Phi0 has to remember the pos of it's internal value. If the real
1111 Phi is computed, pos is used to update the array with the local
1113 res = new_rd_Phi0(current_ir_graph, block, mode);
1114 res->attr.phi.u.pos = pos;
1115 res->attr.phi.next = block->attr.block.phis;
1116 block->attr.block.phis = res;
1119 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1121 /* The local valid value is available now. */
1122 block->attr.block.graph_arr[pos] = res;
1125 } /* get_r_value_internal */
1127 /* ************************************************************************** */
1130 * Finalize a Block node, when all control flows are known.
1131 * Acceptable parameters are only Block nodes.
1134 mature_immBlock(ir_node *block) {
1139 assert(is_Block(block));
1140 if (!get_Block_matured(block)) {
1141 ir_graph *irg = current_ir_graph;
1143 ins = ARR_LEN(block->in) - 1;
1144 /* Fix block parameters */
1145 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1147 /* An array for building the Phi nodes. */
1148 NEW_ARR_A(ir_node *, nin, ins);
1150 /* Traverse a chain of Phi nodes attached to this block and mature
1152 for (n = block->attr.block.phis; n; n = next) {
1153 inc_irg_visited(irg);
1154 next = n->attr.phi.next;
1155 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1158 block->attr.block.is_matured = 1;
1160 /* Now, as the block is a finished Firm node, we can optimize it.
1161 Since other nodes have been allocated since the block was created
1162 we can not free the node on the obstack. Therefore we have to call
1163 optimize_in_place().
1164 Unfortunately the optimization does not change a lot, as all allocated
1165 nodes refer to the unoptimized node.
1166 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1167 block = optimize_in_place_2(block);
1168 IRN_VRFY_IRG(block, irg);
1170 } /* mature_immBlock */
1173 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
1174 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1178 new_d_Const(dbg_info *db, tarval *con) {
1179 return new_bd_Const(db, con);
1183 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
1184 return new_bd_Const_long(db, mode, value);
1185 } /* new_d_Const_long */
1188 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
1189 return new_bd_Const_type(db, con, tp);
1190 } /* new_d_Const_type */
1194 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
1196 assert(arg->op == op_Cond);
1197 arg->attr.cond.kind = fragmentary;
1198 arg->attr.cond.default_proj = max_proj;
1199 res = new_d_Proj(db, arg, mode_X, max_proj);
1201 } /* new_d_defaultProj */
1204 * Allocate a frag array for a node if the current graph state is phase_building.
1206 * @param irn the node for which the frag array should be allocated
1207 * @param op the opcode of the (original) node, if does not match opcode of irn,
1209 * @param frag_store the address of the frag store in irn attributes, if this
1210 * address contains a value != NULL, does nothing
1212 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
1213 if (get_opt_precise_exc_context()) {
1214 if ((current_ir_graph->phase_state == phase_building) &&
1215 (get_irn_op(irn) == op) && /* Could be optimized away. */
1216 !*frag_store) /* Could be a cse where the arr is already set. */ {
1217 *frag_store = new_frag_arr(irn);
1220 } /* firm_alloc_frag_arr */
1223 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1224 /* GL: objptr was called frame before. Frame was a bad choice for the name
1225 as the operand could as well be a pointer to a dynamic object. */
1227 return new_bd_Sel(db, current_ir_graph->current_block,
1228 store, objptr, 0, NULL, ent);
1229 } /* new_d_simpleSel */
1232 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
1233 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1235 } /* new_d_SymConst_type */
1238 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
1239 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1240 value, kind, firm_unknown_type);
1241 } /* new_d_SymConst */
1244 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
1245 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
1249 new_d_EndReg(dbg_info *db) {
1250 return new_bd_EndReg(db, current_ir_graph->current_block);
1251 } /* new_d_EndReg */
1254 new_d_EndExcept(dbg_info *db) {
1255 return new_bd_EndExcept(db, current_ir_graph->current_block);
1256 } /* new_d_EndExcept */
1260 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1261 int n_outs, ir_asm_constraint *outputs,
1262 int n_clobber, ident *clobber[], ident *asm_text) {
1263 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1266 /* ********************************************************************* */
1267 /* Comfortable interface with automatic Phi node construction. */
1268 /* (Uses also constructors of ?? interface, except new_Block. */
1269 /* ********************************************************************* */
1271 /* Block construction */
1272 /* immature Block without predecessors */
1274 new_d_immBlock(dbg_info *db) {
1277 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1278 /* creates a new dynamic in-array as length of in is -1 */
1279 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1281 /* macroblock head */
1284 res->attr.block.is_matured = 0;
1285 res->attr.block.is_dead = 0;
1286 res->attr.block.is_mb_head = 1;
1287 res->attr.block.has_label = 0;
1288 res->attr.block.irg = current_ir_graph;
1289 res->attr.block.backedge = NULL;
1290 res->attr.block.in_cg = NULL;
1291 res->attr.block.cg_backedge = NULL;
1292 res->attr.block.extblk = NULL;
1293 res->attr.block.region = NULL;
1294 res->attr.block.mb_depth = 0;
1295 res->attr.block.label = 0;
1297 set_Block_block_visited(res, 0);
1299 /* Create and initialize array for Phi-node construction. */
1300 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1301 current_ir_graph->n_loc);
1302 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1304 /* Immature block may not be optimized! */
1305 IRN_VRFY_IRG(res, current_ir_graph);
1308 } /* new_d_immBlock */
1311 new_immBlock(void) {
1312 return new_d_immBlock(NULL);
1313 } /* new_immBlock */
1315 /* immature PartBlock with its predecessors */
1317 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
1318 ir_node *res = new_d_immBlock(db);
1319 ir_node *blk = get_nodes_block(pred_jmp);
1321 res->in[0] = blk->in[0];
1322 assert(res->in[0] != NULL);
1323 add_immBlock_pred(res, pred_jmp);
1325 res->attr.block.is_mb_head = 0;
1326 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1329 } /* new_d_immPartBlock */
1332 new_immPartBlock(ir_node *pred_jmp) {
1333 return new_d_immPartBlock(NULL, pred_jmp);
1334 } /* new_immPartBlock */
1336 /* add an edge to a jmp/control flow node */
1338 add_immBlock_pred(ir_node *block, ir_node *jmp) {
1339 int n = ARR_LEN(block->in) - 1;
1341 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1342 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1343 assert(is_ir_node(jmp));
1345 ARR_APP1(ir_node *, block->in, jmp);
1347 hook_set_irn_n(block, n, jmp, NULL);
1348 } /* add_immBlock_pred */
1350 /* changing the current block */
1352 set_cur_block(ir_node *target) {
1353 current_ir_graph->current_block = target;
1354 } /* set_cur_block */
1356 /* ************************ */
1357 /* parameter administration */
1359 /* get a value from the parameter array from the current block by its index */
1361 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
1362 ir_graph *irg = current_ir_graph;
1363 assert(get_irg_phase_state(irg) == phase_building);
1364 inc_irg_visited(irg);
1369 return get_r_value_internal(irg->current_block, pos + 1, mode);
1372 /* get a value from the parameter array from the current block by its index */
1374 get_value(int pos, ir_mode *mode) {
1375 return get_d_value(NULL, pos, mode);
1378 /* set a value at position pos in the parameter array from the current block */
1380 set_value(int pos, ir_node *value) {
1381 ir_graph *irg = current_ir_graph;
1382 assert(get_irg_phase_state(irg) == phase_building);
1384 assert(pos+1 < irg->n_loc);
1385 assert(is_ir_node(value));
1386 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1389 /* Find the value number for a node in the current block.*/
1391 find_value(ir_node *value) {
1393 ir_node *bl = current_ir_graph->current_block;
1395 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1396 if (bl->attr.block.graph_arr[i] == value)
1401 /* get the current store */
1404 ir_graph *irg = current_ir_graph;
1406 assert(get_irg_phase_state(irg) == phase_building);
1407 /* GL: one could call get_value instead */
1408 inc_irg_visited(irg);
1409 return get_r_value_internal(irg->current_block, 0, mode_M);
1412 /* set the current store: handles automatic Sync construction for Load nodes */
1414 set_store(ir_node *store) {
1415 ir_node *load, *pload, *pred, *in[2];
1417 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1418 /* Beware: due to dead code elimination, a store might become a Bad node even in
1419 the construction phase. */
1420 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1422 if (get_opt_auto_create_sync()) {
1423 /* handle non-volatile Load nodes by automatically creating Sync's */
1424 load = skip_Proj(store);
1425 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1426 pred = get_Load_mem(load);
1428 if (is_Sync(pred)) {
1429 /* a Load after a Sync: move it up */
1430 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1432 set_Load_mem(load, get_memop_mem(mem));
1433 add_Sync_pred(pred, store);
1436 pload = skip_Proj(pred);
1437 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1438 /* a Load after a Load: create a new Sync */
1439 set_Load_mem(load, get_Load_mem(pload));
1443 store = new_Sync(2, in);
1448 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1452 keep_alive(ir_node *ka) {
1453 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1456 /* --- Useful access routines --- */
1457 /* Returns the current block of the current graph. To set the current
1458 block use set_cur_block. */
1459 ir_node *get_cur_block(void) {
1460 return get_irg_current_block(current_ir_graph);
1461 } /* get_cur_block */
1463 /* Returns the frame type of the current graph */
1464 ir_type *get_cur_frame_type(void) {
1465 return get_irg_frame_type(current_ir_graph);
1466 } /* get_cur_frame_type */
1469 /* ********************************************************************* */
1472 /* call once for each run of the library */
1474 firm_init_cons(uninitialized_local_variable_func_t *func) {
1475 default_initialize_local_variable = func;
1476 } /* firm_init_cons */
1479 irp_finalize_cons(void) {
1481 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1482 irg_finalize_cons(get_irp_irg(i));
1484 irp->phase_state = phase_high;
1485 } /* irp_finalize_cons */
1487 ir_node *new_Start(void) {
1488 return new_d_Start(NULL);
1490 ir_node *new_End(void) {
1491 return new_d_End(NULL);
1493 ir_node *new_Const(tarval *con) {
1494 return new_d_Const(NULL, con);
1497 ir_node *new_Const_long(ir_mode *mode, long value) {
1498 return new_d_Const_long(NULL, mode, value);
1501 ir_node *new_Const_type(tarval *con, ir_type *tp) {
1502 return new_d_Const_type(NULL, con, tp);
1505 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
1506 return new_d_SymConst_type(NULL, mode, value, kind, type);
1508 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
1509 return new_d_SymConst(NULL, mode, value, kind);
1511 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
1512 return new_d_simpleSel(NULL, store, objptr, ent);
1514 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
1515 return new_d_Phi(NULL, arity, in, mode);
1517 ir_node *new_Sync(int arity, ir_node *in[]) {
1518 return new_d_Sync(NULL, arity, in);
1520 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
1521 return new_d_defaultProj(NULL, arg, max_proj);
1523 ir_node *new_Bad(void) {
1524 return get_irg_bad(current_ir_graph);
1526 ir_node *new_EndReg(void) {
1527 return new_d_EndReg(NULL);
1529 ir_node *new_EndExcept(void) {
1530 return new_d_EndExcept(NULL);
1532 ir_node *new_NoMem(void) {
1533 return get_irg_no_mem(current_ir_graph);
1535 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1536 int n_outs, ir_asm_constraint *outputs,
1537 int n_clobber, ident *clobber[], ident *asm_text) {
1538 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1541 /* create a new anchor node */
1542 ir_node *new_Anchor(ir_graph *irg) {
1543 ir_node *in[anchor_last];
1544 memset(in, 0, sizeof(in));
1545 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);