3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
29 #include "irgraph_t.h"
33 #include "firm_common_t.h"
39 #include "irbackedge_t.h"
41 #include "iredges_t.h"
44 #if USE_EXPLICIT_PHI_IN_STACK
45 /* A stack needed for the automatic Phi node construction in constructor
46 Phi_in. Redefinition in irgraph.c!! */
51 typedef struct Phi_in_stack Phi_in_stack;
54 /* when we need verifying */
56 # define IRN_VRFY_IRG(res, irg)
58 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
62 * Language dependent variable initialization callback.
64 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* creates a bd constructor for a binop */
67 #define NEW_BD_BINOP(instr) \
69 new_bd_##instr(dbg_info *db, ir_node *block, \
70 ir_node *op1, ir_node *op2, ir_mode *mode) \
74 ir_graph *irg = current_ir_graph; \
77 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
78 res = optimize_node(res); \
79 IRN_VRFY_IRG(res, irg); \
83 /* creates a bd constructor for an unop */
84 #define NEW_BD_UNOP(instr) \
86 new_bd_##instr(dbg_info *db, ir_node *block, \
87 ir_node *op, ir_mode *mode) \
90 ir_graph *irg = current_ir_graph; \
91 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
92 res = optimize_node(res); \
93 IRN_VRFY_IRG(res, irg); \
97 /* creates a bd constructor for an divop */
98 #define NEW_BD_DIVOP(instr) \
100 new_bd_##instr(dbg_info *db, ir_node *block, \
101 ir_node *memop, ir_node *op1, ir_node *op2) \
105 ir_graph *irg = current_ir_graph; \
109 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
110 res = optimize_node(res); \
111 IRN_VRFY_IRG(res, irg); \
115 /* creates a rd constructor for a binop */
116 #define NEW_RD_BINOP(instr) \
118 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
119 ir_node *op1, ir_node *op2, ir_mode *mode) \
122 ir_graph *rem = current_ir_graph; \
123 current_ir_graph = irg; \
124 res = new_bd_##instr(db, block, op1, op2, mode); \
125 current_ir_graph = rem; \
129 /* creates a rd constructor for an unop */
130 #define NEW_RD_UNOP(instr) \
132 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
133 ir_node *op, ir_mode *mode) \
136 ir_graph *rem = current_ir_graph; \
137 current_ir_graph = irg; \
138 res = new_bd_##instr(db, block, op, mode); \
139 current_ir_graph = rem; \
143 /* creates a rd constructor for an divop */
144 #define NEW_RD_DIVOP(instr) \
146 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
147 ir_node *memop, ir_node *op1, ir_node *op2) \
150 ir_graph *rem = current_ir_graph; \
151 current_ir_graph = irg; \
152 res = new_bd_##instr(db, block, memop, op1, op2); \
153 current_ir_graph = rem; \
157 /* creates a d constructor for an binop */
158 #define NEW_D_BINOP(instr) \
160 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
161 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
164 /* creates a d constructor for an unop */
165 #define NEW_D_UNOP(instr) \
167 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
168 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
173 * Constructs a Block with a fixed number of predecessors.
174 * Does not set current_block. Can not be used with automatic
175 * Phi node construction.
178 new_bd_Block(dbg_info *db, int arity, ir_node **in)
181 ir_graph *irg = current_ir_graph;
183 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
184 set_Block_matured(res, 1);
185 set_Block_block_visited(res, 0);
187 /* res->attr.block.exc = exc_normal; */
188 /* res->attr.block.handler_entry = 0; */
189 res->attr.block.dead = 0;
190 res->attr.block.irg = irg;
191 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
192 res->attr.block.in_cg = NULL;
193 res->attr.block.cg_backedge = NULL;
194 res->attr.block.extblk = NULL;
196 IRN_VRFY_IRG(res, irg);
201 new_bd_Start(dbg_info *db, ir_node *block)
204 ir_graph *irg = current_ir_graph;
206 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
207 /* res->attr.start.irg = irg; */
209 IRN_VRFY_IRG(res, irg);
214 new_bd_End(dbg_info *db, ir_node *block)
217 ir_graph *irg = current_ir_graph;
219 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
221 IRN_VRFY_IRG(res, irg);
226 * Creates a Phi node with all predecessors. Calling this constructor
227 * is only allowed if the corresponding block is mature.
230 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
233 ir_graph *irg = current_ir_graph;
237 /* Don't assert that block matured: the use of this constructor is strongly
239 if ( get_Block_matured(block) )
240 assert( get_irn_arity(block) == arity );
242 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
244 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
246 for (i = arity-1; i >= 0; i--)
247 if (get_irn_op(in[i]) == op_Unknown) {
252 if (!has_unknown) res = optimize_node (res);
253 IRN_VRFY_IRG(res, irg);
255 /* Memory Phis in endless loops must be kept alive.
256 As we can't distinguish these easily we keep all of them alive. */
257 if ((res->op == op_Phi) && (mode == mode_M))
258 add_End_keepalive(get_irg_end(irg), res);
263 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
266 ir_graph *irg = current_ir_graph;
268 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
269 res->attr.con.tv = con;
270 set_Const_type(res, tp); /* Call method because of complex assertion. */
271 res = optimize_node (res);
272 assert(get_Const_type(res) == tp);
273 IRN_VRFY_IRG(res, irg);
276 } /* new_bd_Const_type */
279 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
281 ir_graph *irg = current_ir_graph;
283 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
287 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value)
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
292 } /* new_bd_Const_long */
295 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
298 ir_graph *irg = current_ir_graph;
300 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
301 res = optimize_node(res);
302 IRN_VRFY_IRG(res, irg);
307 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
314 res->attr.proj = proj;
317 assert(get_Proj_pred(res));
318 assert(get_nodes_block(get_Proj_pred(res)));
320 res = optimize_node(res);
322 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
332 ir_graph *irg = current_ir_graph;
334 assert(arg->op == op_Cond);
335 arg->attr.cond.kind = fragmentary;
336 arg->attr.cond.default_proj = max_proj;
337 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
339 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag)
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
355 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
358 ir_graph *irg = current_ir_graph;
360 assert(is_atomic_type(to_tp));
362 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
363 res->attr.cast.totype = to_tp;
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in)
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
402 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
406 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Jmp(dbg_info *db, ir_node *block)
419 ir_graph *irg = current_ir_graph;
421 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
422 res = optimize_node (res);
423 IRN_VRFY_IRG (res, irg);
428 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt)
431 ir_graph *irg = current_ir_graph;
433 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
434 res = optimize_node (res);
435 IRN_VRFY_IRG (res, irg);
437 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
443 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c)
446 ir_graph *irg = current_ir_graph;
448 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
449 res->attr.cond.kind = dense;
450 res->attr.cond.default_proj = 0;
451 res->attr.cond.pred = COND_JMP_PRED_NONE;
452 res = optimize_node (res);
453 IRN_VRFY_IRG(res, irg);
458 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
459 ir_node *callee, int arity, ir_node **in, ir_type *tp)
464 ir_graph *irg = current_ir_graph;
467 NEW_ARR_A(ir_node *, r_in, r_arity);
470 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
472 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
474 assert((get_unknown_type() == tp) || is_Method_type(tp));
475 set_Call_type(res, tp);
476 res->attr.call.exc.pin_state = op_pin_state_pinned;
477 res->attr.call.callee_arr = NULL;
478 res = optimize_node(res);
479 IRN_VRFY_IRG(res, irg);
484 new_bd_Return (dbg_info *db, ir_node *block,
485 ir_node *store, int arity, ir_node **in)
490 ir_graph *irg = current_ir_graph;
493 NEW_ARR_A (ir_node *, r_in, r_arity);
495 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
496 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
497 res = optimize_node(res);
498 IRN_VRFY_IRG(res, irg);
503 new_bd_Load(dbg_info *db, ir_node *block,
504 ir_node *store, ir_node *adr, ir_mode *mode)
508 ir_graph *irg = current_ir_graph;
512 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
513 res->attr.load.exc.pin_state = op_pin_state_pinned;
514 res->attr.load.load_mode = mode;
515 res->attr.load.volatility = volatility_non_volatile;
516 res = optimize_node(res);
517 IRN_VRFY_IRG(res, irg);
522 new_bd_Store(dbg_info *db, ir_node *block,
523 ir_node *store, ir_node *adr, ir_node *val)
527 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
533 res->attr.store.exc.pin_state = op_pin_state_pinned;
534 res->attr.store.volatility = volatility_non_volatile;
535 res = optimize_node(res);
536 IRN_VRFY_IRG(res, irg);
541 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
542 ir_node *size, ir_type *alloc_type, where_alloc where)
546 ir_graph *irg = current_ir_graph;
550 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
551 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
552 res->attr.alloc.where = where;
553 res->attr.alloc.type = alloc_type;
554 res = optimize_node(res);
555 IRN_VRFY_IRG(res, irg);
560 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
561 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
565 ir_graph *irg = current_ir_graph;
570 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
571 res->attr.free.where = where;
572 res->attr.free.type = free_type;
573 res = optimize_node(res);
574 IRN_VRFY_IRG(res, irg);
579 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
580 int arity, ir_node **in, entity *ent)
585 ir_graph *irg = current_ir_graph;
587 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
590 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
593 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
595 * FIXM: Sel's can select functions which should be of mode mode_P_code.
597 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
598 res->attr.sel.ent = ent;
599 res = optimize_node(res);
600 IRN_VRFY_IRG(res, irg);
605 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
606 symconst_kind symkind, ir_type *tp) {
609 ir_graph *irg = current_ir_graph;
611 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
612 mode = mode_P_data; /* FIXME: can be mode_P_code */
616 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
618 res->attr.symc.num = symkind;
619 res->attr.symc.sym = value;
620 res->attr.symc.tp = tp;
622 res = optimize_node(res);
623 IRN_VRFY_IRG(res, irg);
625 } /* new_bd_SymConst_type */
628 new_bd_SymConst(dbg_info *db, ir_node *block, symconst_symbol value,
629 symconst_kind symkind)
631 ir_graph *irg = current_ir_graph;
633 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
635 } /* new_bd_SymConst */
638 new_bd_Sync(dbg_info *db, ir_node *block)
641 ir_graph *irg = current_ir_graph;
643 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
644 /* no need to call optimize node here, Sync are always created with no predecessors */
645 IRN_VRFY_IRG(res, irg);
650 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
652 ir_node *in[2], *res;
653 ir_graph *irg = current_ir_graph;
657 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
658 res->attr.confirm_cmp = cmp;
659 res = optimize_node (res);
660 IRN_VRFY_IRG(res, irg);
664 /* this function is often called with current_ir_graph unset */
666 new_bd_Unknown(ir_mode *m)
669 ir_graph *irg = current_ir_graph;
671 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
672 res = optimize_node(res);
674 } /* new_bd_Unknown */
677 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call)
681 ir_graph *irg = current_ir_graph;
683 in[0] = get_Call_ptr(call);
684 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
685 /* res->attr.callbegin.irg = irg; */
686 res->attr.callbegin.call = call;
687 res = optimize_node(res);
688 IRN_VRFY_IRG(res, irg);
690 } /* new_bd_CallBegin */
693 new_bd_EndReg(dbg_info *db, ir_node *block)
696 ir_graph *irg = current_ir_graph;
698 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
699 set_irg_end_reg(irg, res);
700 IRN_VRFY_IRG(res, irg);
702 } /* new_bd_EndReg */
705 new_bd_EndExcept(dbg_info *db, ir_node *block)
708 ir_graph *irg = current_ir_graph;
710 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
711 set_irg_end_except(irg, res);
712 IRN_VRFY_IRG (res, irg);
714 } /* new_bd_EndExcept */
717 new_bd_Break(dbg_info *db, ir_node *block)
720 ir_graph *irg = current_ir_graph;
722 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
723 res = optimize_node(res);
724 IRN_VRFY_IRG(res, irg);
729 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
733 ir_graph *irg = current_ir_graph;
735 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
736 res->attr.filter.proj = proj;
737 res->attr.filter.in_cg = NULL;
738 res->attr.filter.backedge = NULL;
741 assert(get_Proj_pred(res));
742 assert(get_nodes_block(get_Proj_pred(res)));
744 res = optimize_node(res);
745 IRN_VRFY_IRG(res, irg);
747 } /* new_bd_Filter */
750 new_bd_Mux(dbg_info *db, ir_node *block,
751 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
755 ir_graph *irg = current_ir_graph;
761 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
764 res = optimize_node(res);
765 IRN_VRFY_IRG(res, irg);
770 new_bd_Psi(dbg_info *db, ir_node *block,
771 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
775 ir_graph *irg = current_ir_graph;
778 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
780 for (i = 0; i < arity; ++i) {
782 in[2 * i + 1] = vals[i];
786 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
789 res = optimize_node(res);
790 IRN_VRFY_IRG(res, irg);
795 new_bd_CopyB(dbg_info *db, ir_node *block,
796 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
800 ir_graph *irg = current_ir_graph;
806 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
808 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
809 res->attr.copyb.data_type = data_type;
810 res = optimize_node(res);
811 IRN_VRFY_IRG(res, irg);
816 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
817 ir_node *objptr, ir_type *type)
821 ir_graph *irg = current_ir_graph;
825 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
826 res->attr.instof.type = type;
827 res = optimize_node(res);
828 IRN_VRFY_IRG(res, irg);
830 } /* new_bd_InstOf */
833 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
837 ir_graph *irg = current_ir_graph;
841 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
842 res = optimize_node(res);
843 IRN_VRFY_IRG(res, irg);
848 new_bd_Bound(dbg_info *db, ir_node *block,
849 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
853 ir_graph *irg = current_ir_graph;
859 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
860 res->attr.bound.exc.pin_state = op_pin_state_pinned;
861 res = optimize_node(res);
862 IRN_VRFY_IRG(res, irg);
867 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node)
870 ir_graph *irg = current_ir_graph;
872 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
873 res = optimize_node(res);
874 IRN_VRFY_IRG(res, irg);
878 /* --------------------------------------------- */
879 /* private interfaces, for professional use only */
880 /* --------------------------------------------- */
882 /* Constructs a Block with a fixed number of predecessors.
883 Does not set current_block. Can not be used with automatic
884 Phi node construction. */
886 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in)
888 ir_graph *rem = current_ir_graph;
891 current_ir_graph = irg;
892 res = new_bd_Block(db, arity, in);
893 current_ir_graph = rem;
899 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
901 ir_graph *rem = current_ir_graph;
904 current_ir_graph = irg;
905 res = new_bd_Start(db, block);
906 current_ir_graph = rem;
912 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
915 ir_graph *rem = current_ir_graph;
917 current_ir_graph = rem;
918 res = new_bd_End(db, block);
919 current_ir_graph = rem;
924 /* Creates a Phi node with all predecessors. Calling this constructor
925 is only allowed if the corresponding block is mature. */
927 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
930 ir_graph *rem = current_ir_graph;
932 current_ir_graph = irg;
933 res = new_bd_Phi(db, block,arity, in, mode);
934 current_ir_graph = rem;
940 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
943 ir_graph *rem = current_ir_graph;
945 current_ir_graph = irg;
946 res = new_bd_Const_type(db, block, mode, con, tp);
947 current_ir_graph = rem;
950 } /* new_rd_Const_type */
953 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
956 ir_graph *rem = current_ir_graph;
958 current_ir_graph = irg;
959 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
960 current_ir_graph = rem;
966 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
968 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
969 } /* new_rd_Const_long */
972 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
975 ir_graph *rem = current_ir_graph;
977 current_ir_graph = irg;
978 res = new_bd_Id(db, block, val, mode);
979 current_ir_graph = rem;
985 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
989 ir_graph *rem = current_ir_graph;
991 current_ir_graph = irg;
992 res = new_bd_Proj(db, block, arg, mode, proj);
993 current_ir_graph = rem;
999 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1003 ir_graph *rem = current_ir_graph;
1005 current_ir_graph = irg;
1006 res = new_bd_defaultProj(db, block, arg, max_proj);
1007 current_ir_graph = rem;
1010 } /* new_rd_defaultProj */
1013 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1016 ir_graph *rem = current_ir_graph;
1018 current_ir_graph = irg;
1019 res = new_bd_Conv(db, block, op, mode, 0);
1020 current_ir_graph = rem;
1026 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1029 ir_graph *rem = current_ir_graph;
1031 current_ir_graph = irg;
1032 res = new_bd_Cast(db, block, op, to_tp);
1033 current_ir_graph = rem;
1039 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1042 ir_graph *rem = current_ir_graph;
1044 current_ir_graph = irg;
1045 res = new_bd_Tuple(db, block, arity, in);
1046 current_ir_graph = rem;
1049 } /* new_rd_Tuple */
1056 NEW_RD_DIVOP(DivMod)
1069 NEW_RD_BINOP(Borrow)
1072 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1073 ir_node *op1, ir_node *op2)
1076 ir_graph *rem = current_ir_graph;
1078 current_ir_graph = irg;
1079 res = new_bd_Cmp(db, block, op1, op2);
1080 current_ir_graph = rem;
1086 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block)
1089 ir_graph *rem = current_ir_graph;
1091 current_ir_graph = irg;
1092 res = new_bd_Jmp(db, block);
1093 current_ir_graph = rem;
1099 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1102 ir_graph *rem = current_ir_graph;
1104 current_ir_graph = irg;
1105 res = new_bd_IJmp(db, block, tgt);
1106 current_ir_graph = rem;
1112 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1115 ir_graph *rem = current_ir_graph;
1117 current_ir_graph = irg;
1118 res = new_bd_Cond(db, block, c);
1119 current_ir_graph = rem;
1125 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1126 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1129 ir_graph *rem = current_ir_graph;
1131 current_ir_graph = irg;
1132 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1133 current_ir_graph = rem;
1139 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1140 ir_node *store, int arity, ir_node **in)
1143 ir_graph *rem = current_ir_graph;
1145 current_ir_graph = irg;
1146 res = new_bd_Return(db, block, store, arity, in);
1147 current_ir_graph = rem;
1150 } /* new_rd_Return */
1153 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1154 ir_node *store, ir_node *adr, ir_mode *mode)
1157 ir_graph *rem = current_ir_graph;
1159 current_ir_graph = irg;
1160 res = new_bd_Load(db, block, store, adr, mode);
1161 current_ir_graph = rem;
1167 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1168 ir_node *store, ir_node *adr, ir_node *val)
1171 ir_graph *rem = current_ir_graph;
1173 current_ir_graph = irg;
1174 res = new_bd_Store(db, block, store, adr, val);
1175 current_ir_graph = rem;
1178 } /* new_rd_Store */
1181 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1182 ir_node *size, ir_type *alloc_type, where_alloc where)
1185 ir_graph *rem = current_ir_graph;
1187 current_ir_graph = irg;
1188 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1189 current_ir_graph = rem;
1192 } /* new_rd_Alloc */
1195 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1196 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1199 ir_graph *rem = current_ir_graph;
1201 current_ir_graph = irg;
1202 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1203 current_ir_graph = rem;
1209 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1210 ir_node *store, ir_node *objptr, entity *ent)
1213 ir_graph *rem = current_ir_graph;
1215 current_ir_graph = irg;
1216 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1217 current_ir_graph = rem;
1220 } /* new_rd_simpleSel */
1223 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1224 int arity, ir_node **in, entity *ent)
1227 ir_graph *rem = current_ir_graph;
1229 current_ir_graph = irg;
1230 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1231 current_ir_graph = rem;
1237 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1238 symconst_kind symkind, ir_type *tp)
1241 ir_graph *rem = current_ir_graph;
1243 current_ir_graph = irg;
1244 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1245 current_ir_graph = rem;
1248 } /* new_rd_SymConst_type */
1251 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1252 symconst_kind symkind)
1254 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1256 } /* new_rd_SymConst */
1258 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1260 symconst_symbol sym = {(ir_type *)symbol};
1261 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1262 } /* new_rd_SymConst_addr_ent */
1264 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1265 symconst_symbol sym = {(ir_type *)symbol};
1266 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1267 } /* new_rd_SymConst_addr_name */
1269 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1270 symconst_symbol sym = {symbol};
1271 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1272 } /* new_rd_SymConst_type_tag */
1274 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1275 symconst_symbol sym = {symbol};
1276 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1277 } /* new_rd_SymConst_size */
1279 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1280 symconst_symbol sym = {symbol};
1281 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1282 } /* new_rd_SymConst_align */
1285 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1288 ir_graph *rem = current_ir_graph;
1291 current_ir_graph = irg;
1292 res = new_bd_Sync(db, block);
1293 current_ir_graph = rem;
1295 for (i = 0; i < arity; ++i)
1296 add_Sync_pred(res, in[i]);
1302 new_rd_Bad(ir_graph *irg) {
1303 return get_irg_bad(irg);
1307 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1310 ir_graph *rem = current_ir_graph;
1312 current_ir_graph = irg;
1313 res = new_bd_Confirm(db, block, val, bound, cmp);
1314 current_ir_graph = rem;
1317 } /* new_rd_Confirm */
1319 /* this function is often called with current_ir_graph unset */
1321 new_rd_Unknown(ir_graph *irg, ir_mode *m)
1324 ir_graph *rem = current_ir_graph;
1326 current_ir_graph = irg;
1327 res = new_bd_Unknown(m);
1328 current_ir_graph = rem;
1331 } /* new_rd_Unknown */
1334 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1337 ir_graph *rem = current_ir_graph;
1339 current_ir_graph = irg;
1340 res = new_bd_CallBegin(db, block, call);
1341 current_ir_graph = rem;
1344 } /* new_rd_CallBegin */
1347 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
1351 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1352 set_irg_end_reg(irg, res);
1353 IRN_VRFY_IRG(res, irg);
1355 } /* new_rd_EndReg */
1358 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
1362 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1363 set_irg_end_except(irg, res);
1364 IRN_VRFY_IRG (res, irg);
1366 } /* new_rd_EndExcept */
1369 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block)
1372 ir_graph *rem = current_ir_graph;
1374 current_ir_graph = irg;
1375 res = new_bd_Break(db, block);
1376 current_ir_graph = rem;
1379 } /* new_rd_Break */
1382 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1386 ir_graph *rem = current_ir_graph;
1388 current_ir_graph = irg;
1389 res = new_bd_Filter(db, block, arg, mode, proj);
1390 current_ir_graph = rem;
1393 } /* new_rd_Filter */
1396 new_rd_NoMem(ir_graph *irg) {
1397 return get_irg_no_mem(irg);
1398 } /* new_rd_NoMem */
1401 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1402 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1405 ir_graph *rem = current_ir_graph;
1407 current_ir_graph = irg;
1408 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1409 current_ir_graph = rem;
1415 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1416 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1419 ir_graph *rem = current_ir_graph;
1421 current_ir_graph = irg;
1422 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1423 current_ir_graph = rem;
1428 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1429 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1432 ir_graph *rem = current_ir_graph;
1434 current_ir_graph = irg;
1435 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1436 current_ir_graph = rem;
1439 } /* new_rd_CopyB */
1442 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1443 ir_node *objptr, ir_type *type)
1446 ir_graph *rem = current_ir_graph;
1448 current_ir_graph = irg;
1449 res = new_bd_InstOf(db, block, store, objptr, type);
1450 current_ir_graph = rem;
1453 } /* new_rd_InstOf */
1456 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1459 ir_graph *rem = current_ir_graph;
1461 current_ir_graph = irg;
1462 res = new_bd_Raise(db, block, store, obj);
1463 current_ir_graph = rem;
1466 } /* new_rd_Raise */
1468 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1469 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1472 ir_graph *rem = current_ir_graph;
1474 current_ir_graph = irg;
1475 res = new_bd_Bound(db, block, store, idx, lower, upper);
1476 current_ir_graph = rem;
1479 } /* new_rd_Bound */
1481 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node)
1484 ir_graph *rem = current_ir_graph;
1486 current_ir_graph = irg;
1487 res = new_bd_Pin(db, block, node);
1488 current_ir_graph = rem;
1493 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1494 return new_rd_Block(NULL, irg, arity, in);
1496 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1497 return new_rd_Start(NULL, irg, block);
1499 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1500 return new_rd_End(NULL, irg, block);
1502 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1503 return new_rd_Jmp(NULL, irg, block);
1505 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1506 return new_rd_IJmp(NULL, irg, block, tgt);
1508 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1509 return new_rd_Cond(NULL, irg, block, c);
1511 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1512 ir_node *store, int arity, ir_node **in) {
1513 return new_rd_Return(NULL, irg, block, store, arity, in);
1515 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1516 ir_mode *mode, tarval *con) {
1517 return new_rd_Const(NULL, irg, block, mode, con);
1519 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1520 ir_mode *mode, long value) {
1521 return new_rd_Const_long(NULL, irg, block, mode, value);
1523 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1524 ir_mode *mode, tarval *con, ir_type *tp) {
1525 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1527 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1528 symconst_symbol value, symconst_kind symkind) {
1529 return new_rd_SymConst(NULL, irg, block, value, symkind);
1531 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1532 ir_node *objptr, entity *ent) {
1533 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1535 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1536 ir_node *objptr, int n_index, ir_node **index,
1538 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1540 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1541 ir_node *callee, int arity, ir_node **in,
1543 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1545 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1546 ir_node *op1, ir_node *op2, ir_mode *mode) {
1547 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1549 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1550 ir_node *op1, ir_node *op2, ir_mode *mode) {
1551 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1553 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1554 ir_node *op, ir_mode *mode) {
1555 return new_rd_Minus(NULL, irg, block, op, mode);
1557 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1558 ir_node *op1, ir_node *op2, ir_mode *mode) {
1559 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1561 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1562 ir_node *memop, ir_node *op1, ir_node *op2) {
1563 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1565 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1566 ir_node *memop, ir_node *op1, ir_node *op2) {
1567 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1569 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1570 ir_node *memop, ir_node *op1, ir_node *op2) {
1571 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1573 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1574 ir_node *memop, ir_node *op1, ir_node *op2) {
1575 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1577 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1578 ir_node *op, ir_mode *mode) {
1579 return new_rd_Abs(NULL, irg, block, op, mode);
1581 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1582 ir_node *op1, ir_node *op2, ir_mode *mode) {
1583 return new_rd_And(NULL, irg, block, op1, op2, mode);
1585 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1586 ir_node *op1, ir_node *op2, ir_mode *mode) {
1587 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1589 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1590 ir_node *op1, ir_node *op2, ir_mode *mode) {
1591 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1593 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1594 ir_node *op, ir_mode *mode) {
1595 return new_rd_Not(NULL, irg, block, op, mode);
1597 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1598 ir_node *op, ir_node *k, ir_mode *mode) {
1599 return new_rd_Shl(NULL, irg, block, op, k, mode);
1601 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1602 ir_node *op, ir_node *k, ir_mode *mode) {
1603 return new_rd_Shr(NULL, irg, block, op, k, mode);
1605 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1606 ir_node *op, ir_node *k, ir_mode *mode) {
1607 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1609 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1610 ir_node *op, ir_node *k, ir_mode *mode) {
1611 return new_rd_Rot(NULL, irg, block, op, k, mode);
1613 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1614 ir_node *op, ir_node *k, ir_mode *mode) {
1615 return new_rd_Carry(NULL, irg, block, op, k, mode);
1617 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1618 ir_node *op, ir_node *k, ir_mode *mode) {
1619 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1621 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1622 ir_node *op1, ir_node *op2) {
1623 return new_rd_Cmp(NULL, irg, block, op1, op2);
1625 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1626 ir_node *op, ir_mode *mode) {
1627 return new_rd_Conv(NULL, irg, block, op, mode);
1629 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1630 return new_rd_Cast(NULL, irg, block, op, to_tp);
1632 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1633 ir_node **in, ir_mode *mode) {
1634 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1636 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1637 ir_node *store, ir_node *adr, ir_mode *mode) {
1638 return new_rd_Load(NULL, irg, block, store, adr, mode);
1640 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1641 ir_node *store, ir_node *adr, ir_node *val) {
1642 return new_rd_Store(NULL, irg, block, store, adr, val);
1644 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1645 ir_node *size, ir_type *alloc_type, where_alloc where) {
1646 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1648 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1649 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1650 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1652 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1653 return new_rd_Sync(NULL, irg, block, arity, in);
1655 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1656 ir_mode *mode, long proj) {
1657 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1659 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1661 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1663 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1664 int arity, ir_node **in) {
1665 return new_rd_Tuple(NULL, irg, block, arity, in );
1667 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1668 ir_node *val, ir_mode *mode) {
1669 return new_rd_Id(NULL, irg, block, val, mode);
1671 ir_node *new_r_Bad (ir_graph *irg) {
1672 return new_rd_Bad(irg);
1674 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1675 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1677 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1678 return new_rd_Unknown(irg, m);
1680 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1681 return new_rd_CallBegin(NULL, irg, block, callee);
1683 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1684 return new_rd_EndReg(NULL, irg, block);
1686 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1687 return new_rd_EndExcept(NULL, irg, block);
1689 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1690 return new_rd_Break(NULL, irg, block);
1692 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1693 ir_mode *mode, long proj) {
1694 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1696 ir_node *new_r_NoMem (ir_graph *irg) {
1697 return new_rd_NoMem(irg);
1699 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1700 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1701 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1703 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1704 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1705 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1707 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1708 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1709 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1711 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1713 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1715 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1716 ir_node *store, ir_node *obj) {
1717 return new_rd_Raise(NULL, irg, block, store, obj);
1719 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1720 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1721 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1723 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1724 return new_rd_Pin(NULL, irg, block, node);
1727 /** ********************/
1728 /** public interfaces */
1729 /** construction tools */
1733 * - create a new Start node in the current block
1735 * @return s - pointer to the created Start node
1740 new_d_Start(dbg_info *db)
1744 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1745 op_Start, mode_T, 0, NULL);
1746 /* res->attr.start.irg = current_ir_graph; */
1748 res = optimize_node(res);
1749 IRN_VRFY_IRG(res, current_ir_graph);
1754 new_d_End(dbg_info *db)
1757 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1758 op_End, mode_X, -1, NULL);
1759 res = optimize_node(res);
1760 IRN_VRFY_IRG(res, current_ir_graph);
1765 /* Constructs a Block with a fixed number of predecessors.
1766 Does set current_block. Can be used with automatic Phi
1767 node construction. */
1769 new_d_Block(dbg_info *db, int arity, ir_node **in)
1773 int has_unknown = 0;
1775 res = new_bd_Block(db, arity, in);
1777 /* Create and initialize array for Phi-node construction. */
1778 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1779 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1780 current_ir_graph->n_loc);
1781 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1784 for (i = arity-1; i >= 0; i--)
1785 if (get_irn_op(in[i]) == op_Unknown) {
1790 if (!has_unknown) res = optimize_node(res);
1791 current_ir_graph->current_block = res;
1793 IRN_VRFY_IRG(res, current_ir_graph);
1798 /* ***********************************************************************/
1799 /* Methods necessary for automatic Phi node creation */
1801 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1802 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1803 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1804 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1806 Call Graph: ( A ---> B == A "calls" B)
1808 get_value mature_immBlock
1816 get_r_value_internal |
1820 new_rd_Phi0 new_rd_Phi_in
1822 * *************************************************************************** */
1824 /** Creates a Phi node with 0 predecessors. */
1825 static INLINE ir_node *
1826 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
1830 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1831 IRN_VRFY_IRG(res, irg);
1835 /* There are two implementations of the Phi node construction. The first
1836 is faster, but does not work for blocks with more than 2 predecessors.
1837 The second works always but is slower and causes more unnecessary Phi
1839 Select the implementations by the following preprocessor flag set in
1841 #if USE_FAST_PHI_CONSTRUCTION
1843 /* This is a stack used for allocating and deallocating nodes in
1844 new_rd_Phi_in. The original implementation used the obstack
1845 to model this stack, now it is explicit. This reduces side effects.
1847 #if USE_EXPLICIT_PHI_IN_STACK
1849 new_Phi_in_stack(void) {
1852 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1854 res->stack = NEW_ARR_F (ir_node *, 0);
1858 } /* new_Phi_in_stack */
1861 free_Phi_in_stack(Phi_in_stack *s) {
1862 DEL_ARR_F(s->stack);
1864 } /* free_Phi_in_stack */
1867 free_to_Phi_in_stack(ir_node *phi) {
1868 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1869 current_ir_graph->Phi_in_stack->pos)
1870 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1872 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1874 (current_ir_graph->Phi_in_stack->pos)++;
1875 } /* free_to_Phi_in_stack */
1877 static INLINE ir_node *
1878 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1879 int arity, ir_node **in) {
1881 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1882 int pos = current_ir_graph->Phi_in_stack->pos;
1886 /* We need to allocate a new node */
1887 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1888 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1890 /* reuse the old node and initialize it again. */
1893 assert (res->kind == k_ir_node);
1894 assert (res->op == op_Phi);
1898 assert (arity >= 0);
1899 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1900 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1902 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1904 (current_ir_graph->Phi_in_stack->pos)--;
1907 } /* alloc_or_pop_from_Phi_in_stack */
1908 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1911 * Creates a Phi node with a given, fixed array **in of predecessors.
1912 * If the Phi node is unnecessary, as the same value reaches the block
1913 * through all control flow paths, it is eliminated and the value
1914 * returned directly. This constructor is only intended for use in
1915 * the automatic Phi node generation triggered by get_value or mature.
1916 * The implementation is quite tricky and depends on the fact, that
1917 * the nodes are allocated on a stack:
1918 * The in array contains predecessors and NULLs. The NULLs appear,
1919 * if get_r_value_internal, that computed the predecessors, reached
1920 * the same block on two paths. In this case the same value reaches
1921 * this block on both paths, there is no definition in between. We need
1922 * not allocate a Phi where these path's merge, but we have to communicate
1923 * this fact to the caller. This happens by returning a pointer to the
1924 * node the caller _will_ allocate. (Yes, we predict the address. We can
1925 * do so because the nodes are allocated on the obstack.) The caller then
1926 * finds a pointer to itself and, when this routine is called again,
1927 * eliminates itself.
1929 static INLINE ir_node *
1930 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1933 ir_node *res, *known;
1935 /* Allocate a new node on the obstack. This can return a node to
1936 which some of the pointers in the in-array already point.
1937 Attention: the constructor copies the in array, i.e., the later
1938 changes to the array in this routine do not affect the
1939 constructed node! If the in array contains NULLs, there will be
1940 missing predecessors in the returned node. Is this a possible
1941 internal state of the Phi node generation? */
1942 #if USE_EXPLICIT_PHI_IN_STACK
1943 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1945 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1946 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1949 /* The in-array can contain NULLs. These were returned by
1950 get_r_value_internal if it reached the same block/definition on a
1951 second path. The NULLs are replaced by the node itself to
1952 simplify the test in the next loop. */
1953 for (i = 0; i < ins; ++i) {
1958 /* This loop checks whether the Phi has more than one predecessor.
1959 If so, it is a real Phi node and we break the loop. Else the Phi
1960 node merges the same definition on several paths and therefore is
1962 for (i = 0; i < ins; ++i) {
1963 if (in[i] == res || in[i] == known)
1972 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1974 #if USE_EXPLICIT_PHI_IN_STACK
1975 free_to_Phi_in_stack(res);
1977 edges_node_deleted(res, current_ir_graph);
1978 obstack_free(current_ir_graph->obst, res);
1982 res = optimize_node (res);
1983 IRN_VRFY_IRG(res, irg);
1986 /* return the pointer to the Phi node. This node might be deallocated! */
1988 } /* new_rd_Phi_in */
1991 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1994 * Allocates and returns this node. The routine called to allocate the
1995 * node might optimize it away and return a real value, or even a pointer
1996 * to a deallocated Phi node on top of the obstack!
1997 * This function is called with an in-array of proper size.
2000 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2002 ir_node *prevBlock, *res;
2005 /* This loop goes to all predecessor blocks of the block the Phi node is in
2006 and there finds the operands of the Phi node by calling
2007 get_r_value_internal. */
2008 for (i = 1; i <= ins; ++i) {
2009 assert (block->in[i]);
2010 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2012 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2015 /* After collecting all predecessors into the array nin a new Phi node
2016 with these predecessors is created. This constructor contains an
2017 optimization: If all predecessors of the Phi node are identical it
2018 returns the only operand instead of a new Phi node. If the value
2019 passes two different control flow edges without being defined, and
2020 this is the second path treated, a pointer to the node that will be
2021 allocated for the first path (recursion) is returned. We already
2022 know the address of this node, as it is the next node to be allocated
2023 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2024 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2026 /* Now we now the value for "pos" and can enter it in the array with
2027 all known local variables. Attention: this might be a pointer to
2028 a node, that later will be allocated!!! See new_rd_Phi_in().
2029 If this is called in mature, after some set_value() in the same block,
2030 the proper value must not be overwritten:
2032 get_value (makes Phi0, put's it into graph_arr)
2033 set_value (overwrites Phi0 in graph_arr)
2034 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2037 if (!block->attr.block.graph_arr[pos]) {
2038 block->attr.block.graph_arr[pos] = res;
2040 /* printf(" value already computed by %s\n",
2041 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2048 * This function returns the last definition of a variable. In case
2049 * this variable was last defined in a previous block, Phi nodes are
2050 * inserted. If the part of the firm graph containing the definition
2051 * is not yet constructed, a dummy Phi node is returned.
2054 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2057 /* There are 4 cases to treat.
2059 1. The block is not mature and we visit it the first time. We can not
2060 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2061 predecessors is returned. This node is added to the linked list (field
2062 "link") of the containing block to be completed when this block is
2063 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2066 2. The value is already known in this block, graph_arr[pos] is set and we
2067 visit the block the first time. We can return the value without
2068 creating any new nodes.
2070 3. The block is mature and we visit it the first time. A Phi node needs
2071 to be created (phi_merge). If the Phi is not needed, as all it's
2072 operands are the same value reaching the block through different
2073 paths, it's optimized away and the value itself is returned.
2075 4. The block is mature, and we visit it the second time. Now two
2076 subcases are possible:
2077 * The value was computed completely the last time we were here. This
2078 is the case if there is no loop. We can return the proper value.
2079 * The recursion that visited this node and set the flag did not
2080 return yet. We are computing a value in a loop and need to
2081 break the recursion without knowing the result yet.
2082 @@@ strange case. Straight forward we would create a Phi before
2083 starting the computation of it's predecessors. In this case we will
2084 find a Phi here in any case. The problem is that this implementation
2085 only creates a Phi after computing the predecessors, so that it is
2086 hard to compute self references of this Phi. @@@
2087 There is no simple check for the second subcase. Therefore we check
2088 for a second visit and treat all such cases as the second subcase.
2089 Anyways, the basic situation is the same: we reached a block
2090 on two paths without finding a definition of the value: No Phi
2091 nodes are needed on both paths.
2092 We return this information "Two paths, no Phi needed" by a very tricky
2093 implementation that relies on the fact that an obstack is a stack and
2094 will return a node with the same address on different allocations.
2095 Look also at phi_merge and new_rd_phi_in to understand this.
2096 @@@ Unfortunately this does not work, see testprogram
2097 three_cfpred_example.
2101 /* case 4 -- already visited. */
2102 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2104 /* visited the first time */
2105 set_irn_visited(block, get_irg_visited(current_ir_graph));
2107 /* Get the local valid value */
2108 res = block->attr.block.graph_arr[pos];
2110 /* case 2 -- If the value is actually computed, return it. */
2111 if (res) return res;
2113 if (block->attr.block.matured) { /* case 3 */
2115 /* The Phi has the same amount of ins as the corresponding block. */
2116 int ins = get_irn_arity(block);
2118 NEW_ARR_A (ir_node *, nin, ins);
2120 /* Phi merge collects the predecessors and then creates a node. */
2121 res = phi_merge (block, pos, mode, nin, ins);
2123 } else { /* case 1 */
2124 /* The block is not mature, we don't know how many in's are needed. A Phi
2125 with zero predecessors is created. Such a Phi node is called Phi0
2126 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2127 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2129 The Phi0 has to remember the pos of it's internal value. If the real
2130 Phi is computed, pos is used to update the array with the local
2133 res = new_rd_Phi0 (current_ir_graph, block, mode);
2134 res->attr.phi0_pos = pos;
2135 res->link = block->link;
2139 /* If we get here, the frontend missed a use-before-definition error */
2142 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2143 assert (mode->code >= irm_F && mode->code <= irm_P);
2144 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2145 tarval_mode_null[mode->code]);
2148 /* The local valid value is available now. */
2149 block->attr.block.graph_arr[pos] = res;
2152 } /* get_r_value_internal */
2157 it starts the recursion. This causes an Id at the entry of
2158 every block that has no definition of the value! **/
2160 #if USE_EXPLICIT_PHI_IN_STACK
2162 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2163 void free_Phi_in_stack(Phi_in_stack *s) { }
2166 static INLINE ir_node *
2167 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2168 ir_node **in, int ins, ir_node *phi0)
2171 ir_node *res, *known;
2173 /* Allocate a new node on the obstack. The allocation copies the in
2175 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2176 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2178 /* This loop checks whether the Phi has more than one predecessor.
2179 If so, it is a real Phi node and we break the loop. Else the
2180 Phi node merges the same definition on several paths and therefore
2181 is not needed. Don't consider Bad nodes! */
2183 for (i=0; i < ins; ++i)
2187 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2189 /* Optimize self referencing Phis: We can't detect them yet properly, as
2190 they still refer to the Phi0 they will replace. So replace right now. */
2191 if (phi0 && in[i] == phi0) in[i] = res;
2193 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2201 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2204 edges_node_deleted(res, current_ir_graph);
2205 obstack_free (current_ir_graph->obst, res);
2206 if (is_Phi(known)) {
2207 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2208 order, an enclosing Phi know may get superfluous. */
2209 res = optimize_in_place_2(known);
2211 exchange(known, res);
2217 /* A undefined value, e.g., in unreachable code. */
2221 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2222 IRN_VRFY_IRG(res, irg);
2223 /* Memory Phis in endless loops must be kept alive.
2224 As we can't distinguish these easily we keep all of them alive. */
2225 if ((res->op == op_Phi) && (mode == mode_M))
2226 add_End_keepalive(get_irg_end(irg), res);
2230 } /* new_rd_Phi_in */
2233 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2235 #if PRECISE_EXC_CONTEXT
2237 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2240 * Construct a new frag_array for node n.
2241 * Copy the content from the current graph_arr of the corresponding block:
2242 * this is the current state.
2243 * Set ProjM(n) as current memory state.
2244 * Further the last entry in frag_arr of current block points to n. This
2245 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2247 static INLINE ir_node ** new_frag_arr(ir_node *n)
2252 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2253 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2254 sizeof(ir_node *)*current_ir_graph->n_loc);
2256 /* turn off optimization before allocating Proj nodes, as res isn't
2258 opt = get_opt_optimize(); set_optimize(0);
2259 /* Here we rely on the fact that all frag ops have Memory as first result! */
2260 if (get_irn_op(n) == op_Call)
2261 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2262 else if (get_irn_op(n) == op_CopyB)
2263 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2265 assert((pn_Quot_M == pn_DivMod_M) &&
2266 (pn_Quot_M == pn_Div_M) &&
2267 (pn_Quot_M == pn_Mod_M) &&
2268 (pn_Quot_M == pn_Load_M) &&
2269 (pn_Quot_M == pn_Store_M) &&
2270 (pn_Quot_M == pn_Alloc_M) &&
2271 (pn_Quot_M == pn_Bound_M));
2272 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2276 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2278 } /* new_frag_arr */
2281 * Returns the frag_arr from a node.
2283 static INLINE ir_node **get_frag_arr(ir_node *n) {
2284 switch (get_irn_opcode(n)) {
2286 return n->attr.call.exc.frag_arr;
2288 return n->attr.alloc.exc.frag_arr;
2290 return n->attr.load.exc.frag_arr;
2292 return n->attr.store.exc.frag_arr;
2294 return n->attr.except.frag_arr;
2296 } /* get_frag_arr */
2299 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2301 if (!frag_arr[pos]) frag_arr[pos] = val;
2302 if (frag_arr[current_ir_graph->n_loc - 1]) {
2303 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2304 assert(arr != frag_arr && "Endless recursion detected");
2305 set_frag_value(arr, pos, val);
2310 for (i = 0; i < 1000; ++i) {
2311 if (!frag_arr[pos]) {
2312 frag_arr[pos] = val;
2314 if (frag_arr[current_ir_graph->n_loc - 1]) {
2315 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2321 assert(0 && "potential endless recursion");
2323 } /* set_frag_value */
2326 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2330 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2332 frag_arr = get_frag_arr(cfOp);
2333 res = frag_arr[pos];
2335 if (block->attr.block.graph_arr[pos]) {
2336 /* There was a set_value() after the cfOp and no get_value before that
2337 set_value(). We must build a Phi node now. */
2338 if (block->attr.block.matured) {
2339 int ins = get_irn_arity(block);
2341 NEW_ARR_A (ir_node *, nin, ins);
2342 res = phi_merge(block, pos, mode, nin, ins);
2344 res = new_rd_Phi0 (current_ir_graph, block, mode);
2345 res->attr.phi0_pos = pos;
2346 res->link = block->link;
2350 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2351 but this should be better: (remove comment if this works) */
2352 /* It's a Phi, we can write this into all graph_arrs with NULL */
2353 set_frag_value(block->attr.block.graph_arr, pos, res);
2355 res = get_r_value_internal(block, pos, mode);
2356 set_frag_value(block->attr.block.graph_arr, pos, res);
2360 } /* get_r_frag_value_internal */
2361 #endif /* PRECISE_EXC_CONTEXT */
2364 * Computes the predecessors for the real phi node, and then
2365 * allocates and returns this node. The routine called to allocate the
2366 * node might optimize it away and return a real value.
2367 * This function must be called with an in-array of proper size.
2370 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2372 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2375 /* If this block has no value at pos create a Phi0 and remember it
2376 in graph_arr to break recursions.
2377 Else we may not set graph_arr as there a later value is remembered. */
2379 if (!block->attr.block.graph_arr[pos]) {
2380 if (block == get_irg_start_block(current_ir_graph)) {
2381 /* Collapsing to Bad tarvals is no good idea.
2382 So we call a user-supplied routine here that deals with this case as
2383 appropriate for the given language. Sorrily the only help we can give
2384 here is the position.
2386 Even if all variables are defined before use, it can happen that
2387 we get to the start block, if a Cond has been replaced by a tuple
2388 (bad, jmp). In this case we call the function needlessly, eventually
2389 generating an non existent error.
2390 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2393 if (default_initialize_local_variable) {
2394 ir_node *rem = get_cur_block();
2396 set_cur_block(block);
2397 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2401 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2402 /* We don't need to care about exception ops in the start block.
2403 There are none by definition. */
2404 return block->attr.block.graph_arr[pos];
2406 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2407 block->attr.block.graph_arr[pos] = phi0;
2408 #if PRECISE_EXC_CONTEXT
2409 if (get_opt_precise_exc_context()) {
2410 /* Set graph_arr for fragile ops. Also here we should break recursion.
2411 We could choose a cyclic path through an cfop. But the recursion would
2412 break at some point. */
2413 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2419 /* This loop goes to all predecessor blocks of the block the Phi node
2420 is in and there finds the operands of the Phi node by calling
2421 get_r_value_internal. */
2422 for (i = 1; i <= ins; ++i) {
2423 prevCfOp = skip_Proj(block->in[i]);
2425 if (is_Bad(prevCfOp)) {
2426 /* In case a Cond has been optimized we would get right to the start block
2427 with an invalid definition. */
2428 nin[i-1] = new_Bad();
2431 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2433 if (!is_Bad(prevBlock)) {
2434 #if PRECISE_EXC_CONTEXT
2435 if (get_opt_precise_exc_context() &&
2436 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2437 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2438 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2441 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2443 nin[i-1] = new_Bad();
2447 /* We want to pass the Phi0 node to the constructor: this finds additional
2448 optimization possibilities.
2449 The Phi0 node either is allocated in this function, or it comes from
2450 a former call to get_r_value_internal. In this case we may not yet
2451 exchange phi0, as this is done in mature_immBlock. */
2453 phi0_all = block->attr.block.graph_arr[pos];
2454 if (!((get_irn_op(phi0_all) == op_Phi) &&
2455 (get_irn_arity(phi0_all) == 0) &&
2456 (get_nodes_block(phi0_all) == block)))
2462 /* After collecting all predecessors into the array nin a new Phi node
2463 with these predecessors is created. This constructor contains an
2464 optimization: If all predecessors of the Phi node are identical it
2465 returns the only operand instead of a new Phi node. */
2466 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2468 /* In case we allocated a Phi0 node at the beginning of this procedure,
2469 we need to exchange this Phi0 with the real Phi. */
2471 exchange(phi0, res);
2472 block->attr.block.graph_arr[pos] = res;
2473 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2474 only an optimization. */
2481 * This function returns the last definition of a variable. In case
2482 * this variable was last defined in a previous block, Phi nodes are
2483 * inserted. If the part of the firm graph containing the definition
2484 * is not yet constructed, a dummy Phi node is returned.
2487 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2490 /* There are 4 cases to treat.
2492 1. The block is not mature and we visit it the first time. We can not
2493 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2494 predecessors is returned. This node is added to the linked list (field
2495 "link") of the containing block to be completed when this block is
2496 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2499 2. The value is already known in this block, graph_arr[pos] is set and we
2500 visit the block the first time. We can return the value without
2501 creating any new nodes.
2503 3. The block is mature and we visit it the first time. A Phi node needs
2504 to be created (phi_merge). If the Phi is not needed, as all it's
2505 operands are the same value reaching the block through different
2506 paths, it's optimized away and the value itself is returned.
2508 4. The block is mature, and we visit it the second time. Now two
2509 subcases are possible:
2510 * The value was computed completely the last time we were here. This
2511 is the case if there is no loop. We can return the proper value.
2512 * The recursion that visited this node and set the flag did not
2513 return yet. We are computing a value in a loop and need to
2514 break the recursion. This case only happens if we visited
2515 the same block with phi_merge before, which inserted a Phi0.
2516 So we return the Phi0.
2519 /* case 4 -- already visited. */
2520 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2521 /* As phi_merge allocates a Phi0 this value is always defined. Here
2522 is the critical difference of the two algorithms. */
2523 assert(block->attr.block.graph_arr[pos]);
2524 return block->attr.block.graph_arr[pos];
2527 /* visited the first time */
2528 set_irn_visited(block, get_irg_visited(current_ir_graph));
2530 /* Get the local valid value */
2531 res = block->attr.block.graph_arr[pos];
2533 /* case 2 -- If the value is actually computed, return it. */
2534 if (res) { return res; };
2536 if (block->attr.block.matured) { /* case 3 */
2538 /* The Phi has the same amount of ins as the corresponding block. */
2539 int ins = get_irn_arity(block);
2541 NEW_ARR_A (ir_node *, nin, ins);
2543 /* Phi merge collects the predecessors and then creates a node. */
2544 res = phi_merge (block, pos, mode, nin, ins);
2546 } else { /* case 1 */
2547 /* The block is not mature, we don't know how many in's are needed. A Phi
2548 with zero predecessors is created. Such a Phi node is called Phi0
2549 node. The Phi0 is then added to the list of Phi0 nodes in this block
2550 to be matured by mature_immBlock later.
2551 The Phi0 has to remember the pos of it's internal value. If the real
2552 Phi is computed, pos is used to update the array with the local
2554 res = new_rd_Phi0 (current_ir_graph, block, mode);
2555 res->attr.phi0_pos = pos;
2556 res->link = block->link;
2560 /* If we get here, the frontend missed a use-before-definition error */
2563 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2564 assert (mode->code >= irm_F && mode->code <= irm_P);
2565 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2566 get_mode_null(mode));
2569 /* The local valid value is available now. */
2570 block->attr.block.graph_arr[pos] = res;
2573 } /* get_r_value_internal */
2575 #endif /* USE_FAST_PHI_CONSTRUCTION */
2577 /* ************************************************************************** */
2580 * Finalize a Block node, when all control flows are known.
2581 * Acceptable parameters are only Block nodes.
2584 mature_immBlock(ir_node *block)
2590 assert (get_irn_opcode(block) == iro_Block);
2591 /* @@@ should be commented in
2592 assert (!get_Block_matured(block) && "Block already matured"); */
2594 if (!get_Block_matured(block)) {
2595 ins = ARR_LEN (block->in)-1;
2596 /* Fix block parameters */
2597 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2599 /* An array for building the Phi nodes. */
2600 NEW_ARR_A (ir_node *, nin, ins);
2602 /* Traverse a chain of Phi nodes attached to this block and mature
2604 for (n = block->link; n; n=next) {
2605 inc_irg_visited(current_ir_graph);
2607 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2610 block->attr.block.matured = 1;
2612 /* Now, as the block is a finished firm node, we can optimize it.
2613 Since other nodes have been allocated since the block was created
2614 we can not free the node on the obstack. Therefore we have to call
2616 Unfortunately the optimization does not change a lot, as all allocated
2617 nodes refer to the unoptimized node.
2618 We can call _2, as global cse has no effect on blocks. */
2619 block = optimize_in_place_2(block);
2620 IRN_VRFY_IRG(block, current_ir_graph);
2622 } /* mature_immBlock */
2625 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2626 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2630 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2631 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2635 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2636 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2637 } /* new_d_Const_long */
2640 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2641 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2642 } /* new_d_Const_type */
2646 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2647 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2651 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2652 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2656 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2658 assert(arg->op == op_Cond);
2659 arg->attr.cond.kind = fragmentary;
2660 arg->attr.cond.default_proj = max_proj;
2661 res = new_Proj (arg, mode_X, max_proj);
2663 } /* new_d_defaultProj */
2666 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode) {
2667 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2671 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2672 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2673 } /* new_d_strictConv */
2676 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2677 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2681 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2682 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2691 * Allocate the frag array.
2693 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2694 if (get_opt_precise_exc_context()) {
2695 if ((current_ir_graph->phase_state == phase_building) &&
2696 (get_irn_op(res) == op) && /* Could be optimized away. */
2697 !*frag_store) /* Could be a cse where the arr is already set. */ {
2698 *frag_store = new_frag_arr(res);
2701 } /* allocate_frag_arr */
2704 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2706 res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
2707 res->attr.except.pin_state = op_pin_state_pinned;
2708 #if PRECISE_EXC_CONTEXT
2709 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2716 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2718 res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
2719 res->attr.except.pin_state = op_pin_state_pinned;
2720 #if PRECISE_EXC_CONTEXT
2721 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2725 } /* new_d_DivMod */
2728 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2731 res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
2732 res->attr.except.pin_state = op_pin_state_pinned;
2733 #if PRECISE_EXC_CONTEXT
2734 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2741 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2743 res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
2744 res->attr.except.pin_state = op_pin_state_pinned;
2745 #if PRECISE_EXC_CONTEXT
2746 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2765 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2766 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2770 new_d_Jmp(dbg_info *db) {
2771 return new_bd_Jmp(db, current_ir_graph->current_block);
2775 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2776 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2780 new_d_Cond(dbg_info *db, ir_node *c) {
2781 return new_bd_Cond(db, current_ir_graph->current_block, c);
2785 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2789 res = new_bd_Call(db, current_ir_graph->current_block,
2790 store, callee, arity, in, tp);
2791 #if PRECISE_EXC_CONTEXT
2792 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2799 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2800 return new_bd_Return(db, current_ir_graph->current_block,
2802 } /* new_d_Return */
2805 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2807 res = new_bd_Load(db, current_ir_graph->current_block,
2809 #if PRECISE_EXC_CONTEXT
2810 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2817 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2819 res = new_bd_Store(db, current_ir_graph->current_block,
2821 #if PRECISE_EXC_CONTEXT
2822 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2829 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2833 res = new_bd_Alloc(db, current_ir_graph->current_block,
2834 store, size, alloc_type, where);
2835 #if PRECISE_EXC_CONTEXT
2836 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2843 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2844 ir_node *size, ir_type *free_type, where_alloc where)
2846 return new_bd_Free(db, current_ir_graph->current_block,
2847 store, ptr, size, free_type, where);
2851 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
2852 /* GL: objptr was called frame before. Frame was a bad choice for the name
2853 as the operand could as well be a pointer to a dynamic object. */
2855 return new_bd_Sel(db, current_ir_graph->current_block,
2856 store, objptr, 0, NULL, ent);
2857 } /* new_d_simpleSel */
2860 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2862 return new_bd_Sel(db, current_ir_graph->current_block,
2863 store, objptr, n_index, index, sel);
2867 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2869 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2871 } /* new_d_SymConst_type */
2874 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind)
2876 return new_bd_SymConst(db, get_irg_start_block(current_ir_graph),
2878 } /* new_d_SymConst */
2881 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2882 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2888 return _new_d_Bad();
2892 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2893 return new_bd_Confirm(db, current_ir_graph->current_block,
2895 } /* new_d_Confirm */
2898 new_d_Unknown(ir_mode *m) {
2899 return new_bd_Unknown(m);
2900 } /* new_d_Unknown */
2903 new_d_CallBegin(dbg_info *db, ir_node *call) {
2904 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2905 } /* new_d_CallBegin */
2908 new_d_EndReg(dbg_info *db) {
2909 return new_bd_EndReg(db, current_ir_graph->current_block);
2910 } /* new_d_EndReg */
2913 new_d_EndExcept(dbg_info *db) {
2914 return new_bd_EndExcept(db, current_ir_graph->current_block);
2915 } /* new_d_EndExcept */
2918 new_d_Break(dbg_info *db) {
2919 return new_bd_Break(db, current_ir_graph->current_block);
2923 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2924 return new_bd_Filter (db, current_ir_graph->current_block,
2926 } /* new_d_Filter */
2929 (new_d_NoMem)(void) {
2930 return _new_d_NoMem();
2934 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2935 ir_node *ir_true, ir_mode *mode) {
2936 return new_bd_Mux(db, current_ir_graph->current_block,
2937 sel, ir_false, ir_true, mode);
2941 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2942 return new_bd_Psi(db, current_ir_graph->current_block,
2943 arity, conds, vals, mode);
2946 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2947 ir_node *dst, ir_node *src, ir_type *data_type) {
2949 res = new_bd_CopyB(db, current_ir_graph->current_block,
2950 store, dst, src, data_type);
2951 #if PRECISE_EXC_CONTEXT
2952 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2958 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2959 return new_bd_InstOf(db, current_ir_graph->current_block,
2960 store, objptr, type);
2961 } /* new_d_InstOf */
2964 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2965 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2968 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2969 ir_node *idx, ir_node *lower, ir_node *upper) {
2971 res = new_bd_Bound(db, current_ir_graph->current_block,
2972 store, idx, lower, upper);
2973 #if PRECISE_EXC_CONTEXT
2974 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2980 new_d_Pin(dbg_info *db, ir_node *node) {
2981 return new_bd_Pin(db, current_ir_graph->current_block, node);
2984 /* ********************************************************************* */
2985 /* Comfortable interface with automatic Phi node construction. */
2986 /* (Uses also constructors of ?? interface, except new_Block. */
2987 /* ********************************************************************* */
2989 /* Block construction */
2990 /* immature Block without predecessors */
2991 ir_node *new_d_immBlock(dbg_info *db) {
2994 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2995 /* creates a new dynamic in-array as length of in is -1 */
2996 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2997 current_ir_graph->current_block = res;
2998 res->attr.block.matured = 0;
2999 res->attr.block.dead = 0;
3000 /* res->attr.block.exc = exc_normal; */
3001 /* res->attr.block.handler_entry = 0; */
3002 res->attr.block.irg = current_ir_graph;
3003 res->attr.block.backedge = NULL;
3004 res->attr.block.in_cg = NULL;
3005 res->attr.block.cg_backedge = NULL;
3006 set_Block_block_visited(res, 0);
3008 /* Create and initialize array for Phi-node construction. */
3009 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3010 current_ir_graph->n_loc);
3011 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3013 /* Immature block may not be optimized! */
3014 IRN_VRFY_IRG(res, current_ir_graph);
3017 } /* new_d_immBlock */
3020 new_immBlock(void) {
3021 return new_d_immBlock(NULL);
3022 } /* new_immBlock */
3024 /* add an edge to a jmp/control flow node */
3026 add_immBlock_pred(ir_node *block, ir_node *jmp)
3028 if (block->attr.block.matured) {
3029 assert(0 && "Error: Block already matured!\n");
3032 assert(jmp != NULL);
3033 ARR_APP1(ir_node *, block->in, jmp);
3035 } /* add_immBlock_pred */
3037 /* changing the current block */
3039 set_cur_block(ir_node *target) {
3040 current_ir_graph->current_block = target;
3041 } /* set_cur_block */
3043 /* ************************ */
3044 /* parameter administration */
3046 /* get a value from the parameter array from the current block by its index */
3048 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3049 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3050 inc_irg_visited(current_ir_graph);
3052 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3055 /* get a value from the parameter array from the current block by its index */
3057 get_value(int pos, ir_mode *mode) {
3058 return get_d_value(NULL, pos, mode);
3061 /* set a value at position pos in the parameter array from the current block */
3063 set_value(int pos, ir_node *value) {
3064 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3065 assert(pos+1 < current_ir_graph->n_loc);
3066 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3070 find_value(ir_node *value) {
3072 ir_node *bl = current_ir_graph->current_block;
3074 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3075 if (bl->attr.block.graph_arr[i] == value)
3080 /* get the current store */
3084 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3085 /* GL: one could call get_value instead */
3086 inc_irg_visited(current_ir_graph);
3087 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3090 /* set the current store: handles automatic Sync construction for Load nodes */
3092 set_store(ir_node *store)
3094 ir_node *load, *pload, *pred, *in[2];
3096 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3098 if (get_opt_auto_create_sync()) {
3099 /* handle non-volatile Load nodes by automatically creating Sync's */
3100 load = skip_Proj(store);
3101 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3102 pred = get_Load_mem(load);
3104 if (is_Sync(pred)) {
3105 /* a Load after a Sync: move it up */
3106 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3108 set_Load_mem(load, get_memop_mem(mem));
3109 add_Sync_pred(pred, store);
3113 pload = skip_Proj(pred);
3114 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3115 /* a Load after a Load: create a new Sync */
3116 set_Load_mem(load, get_Load_mem(pload));
3120 store = new_Sync(2, in);
3125 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3129 keep_alive(ir_node *ka) {
3130 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3133 /* --- Useful access routines --- */
3134 /* Returns the current block of the current graph. To set the current
3135 block use set_cur_block. */
3136 ir_node *get_cur_block(void) {
3137 return get_irg_current_block(current_ir_graph);
3138 } /* get_cur_block */
3140 /* Returns the frame type of the current graph */
3141 ir_type *get_cur_frame_type(void) {
3142 return get_irg_frame_type(current_ir_graph);
3143 } /* get_cur_frame_type */
3146 /* ********************************************************************* */
3149 /* call once for each run of the library */
3151 init_cons(uninitialized_local_variable_func_t *func) {
3152 default_initialize_local_variable = func;
3156 irp_finalize_cons(void) {
3158 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3159 irg_finalize_cons(get_irp_irg(i));
3161 irp->phase_state = phase_high;
3162 } /* irp_finalize_cons */
3165 ir_node *new_Block(int arity, ir_node **in) {
3166 return new_d_Block(NULL, arity, in);
3168 ir_node *new_Start (void) {
3169 return new_d_Start(NULL);
3171 ir_node *new_End (void) {
3172 return new_d_End(NULL);
3174 ir_node *new_Jmp (void) {
3175 return new_d_Jmp(NULL);
3177 ir_node *new_IJmp (ir_node *tgt) {
3178 return new_d_IJmp(NULL, tgt);
3180 ir_node *new_Cond (ir_node *c) {
3181 return new_d_Cond(NULL, c);
3183 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3184 return new_d_Return(NULL, store, arity, in);
3186 ir_node *new_Const (ir_mode *mode, tarval *con) {
3187 return new_d_Const(NULL, mode, con);
3190 ir_node *new_Const_long(ir_mode *mode, long value)
3192 return new_d_Const_long(NULL, mode, value);
3195 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3196 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3199 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3200 return new_d_SymConst(NULL, value, kind);
3202 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3203 return new_d_simpleSel(NULL, store, objptr, ent);
3205 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3207 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3209 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3211 return new_d_Call(NULL, store, callee, arity, in, tp);
3213 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3214 return new_d_Add(NULL, op1, op2, mode);
3216 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3217 return new_d_Sub(NULL, op1, op2, mode);
3219 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3220 return new_d_Minus(NULL, op, mode);
3222 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3223 return new_d_Mul(NULL, op1, op2, mode);
3225 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3226 return new_d_Quot(NULL, memop, op1, op2);
3228 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3229 return new_d_DivMod(NULL, memop, op1, op2);
3231 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3232 return new_d_Div(NULL, memop, op1, op2);
3234 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3235 return new_d_Mod(NULL, memop, op1, op2);
3237 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3238 return new_d_Abs(NULL, op, mode);
3240 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3241 return new_d_And(NULL, op1, op2, mode);
3243 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3244 return new_d_Or(NULL, op1, op2, mode);
3246 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3247 return new_d_Eor(NULL, op1, op2, mode);
3249 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3250 return new_d_Not(NULL, op, mode);
3252 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3253 return new_d_Shl(NULL, op, k, mode);
3255 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3256 return new_d_Shr(NULL, op, k, mode);
3258 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3259 return new_d_Shrs(NULL, op, k, mode);
3261 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3262 return new_d_Rot(NULL, op, k, mode);
3264 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3265 return new_d_Carry(NULL, op1, op2, mode);
3267 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3268 return new_d_Borrow(NULL, op1, op2, mode);
3270 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3271 return new_d_Cmp(NULL, op1, op2);
3273 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3274 return new_d_Conv(NULL, op, mode);
3276 ir_node *new_strictConv (ir_node *op, ir_mode *mode) {
3277 return new_d_strictConv(NULL, op, mode);
3279 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3280 return new_d_Cast(NULL, op, to_tp);
3282 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3283 return new_d_Phi(NULL, arity, in, mode);
3285 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3286 return new_d_Load(NULL, store, addr, mode);
3288 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3289 return new_d_Store(NULL, store, addr, val);
3291 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3292 where_alloc where) {
3293 return new_d_Alloc(NULL, store, size, alloc_type, where);
3295 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3296 ir_type *free_type, where_alloc where) {
3297 return new_d_Free(NULL, store, ptr, size, free_type, where);
3299 ir_node *new_Sync (int arity, ir_node *in[]) {
3300 return new_d_Sync(NULL, arity, in);
3302 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3303 return new_d_Proj(NULL, arg, mode, proj);
3305 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3306 return new_d_defaultProj(NULL, arg, max_proj);
3308 ir_node *new_Tuple (int arity, ir_node **in) {
3309 return new_d_Tuple(NULL, arity, in);
3311 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3312 return new_d_Id(NULL, val, mode);
3314 ir_node *new_Bad (void) {
3317 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3318 return new_d_Confirm (NULL, val, bound, cmp);
3320 ir_node *new_Unknown(ir_mode *m) {
3321 return new_d_Unknown(m);
3323 ir_node *new_CallBegin (ir_node *callee) {
3324 return new_d_CallBegin(NULL, callee);
3326 ir_node *new_EndReg (void) {
3327 return new_d_EndReg(NULL);
3329 ir_node *new_EndExcept (void) {
3330 return new_d_EndExcept(NULL);
3332 ir_node *new_Break (void) {
3333 return new_d_Break(NULL);
3335 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3336 return new_d_Filter(NULL, arg, mode, proj);
3338 ir_node *new_NoMem (void) {
3339 return new_d_NoMem();
3341 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3342 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3344 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3345 return new_d_Psi(NULL, arity, conds, vals, mode);
3347 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3348 return new_d_CopyB(NULL, store, dst, src, data_type);
3350 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3351 return new_d_InstOf (NULL, store, objptr, ent);
3353 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3354 return new_d_Raise(NULL, store, obj);
3356 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3357 return new_d_Bound(NULL, store, idx, lower, upper);
3359 ir_node *new_Pin(ir_node *node) {
3360 return new_d_Pin(NULL, node);