3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* Constructs a Block with a fixed number of predecessors.
67 Does not set current_block. Can not be used with automatic
68 Phi node construction. */
70 new_bd_Block (dbg_info *db, int arity, ir_node **in)
73 ir_graph *irg = current_ir_graph;
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
86 res->attr.block.extblk = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_bd_Start (dbg_info *db, ir_node *block)
96 ir_graph *irg = current_ir_graph;
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_bd_End (dbg_info *db, ir_node *block)
109 ir_graph *irg = current_ir_graph;
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 ir_graph *irg = current_ir_graph;
127 /* Don't assert that block matured: the use of this constructor is strongly
129 if ( get_Block_matured(block) )
130 assert( get_irn_arity(block) == arity );
132 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
134 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
136 for (i = arity-1; i >= 0; i--)
137 if (get_irn_op(in[i]) == op_Unknown) {
142 if (!has_unknown) res = optimize_node (res);
143 IRN_VRFY_IRG(res, irg);
145 /* Memory Phis in endless loops must be kept alive.
146 As we can't distinguish these easily we keep all of them alive. */
147 if ((res->op == op_Phi) && (mode == mode_M))
148 add_End_keepalive(irg->end, res);
153 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
156 ir_graph *irg = current_ir_graph;
158 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
159 res->attr.con.tv = con;
160 set_Const_type(res, tp); /* Call method because of complex assertion. */
161 res = optimize_node (res);
162 assert(get_Const_type(res) == tp);
163 IRN_VRFY_IRG(res, irg);
169 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
171 ir_graph *irg = current_ir_graph;
173 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
177 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
179 ir_graph *irg = current_ir_graph;
181 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
185 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
191 res = optimize_node(res);
192 IRN_VRFY_IRG(res, irg);
197 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
204 res->attr.proj = proj;
207 assert(get_Proj_pred(res));
208 assert(get_nodes_block(get_Proj_pred(res)));
210 res = optimize_node(res);
212 IRN_VRFY_IRG(res, irg);
218 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
222 ir_graph *irg = current_ir_graph;
224 assert(arg->op == op_Cond);
225 arg->attr.c.kind = fragmentary;
226 arg->attr.c.default_proj = max_proj;
227 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
232 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
235 ir_graph *irg = current_ir_graph;
237 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
238 res = optimize_node(res);
239 IRN_VRFY_IRG(res, irg);
244 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
247 ir_graph *irg = current_ir_graph;
249 assert(is_atomic_type(to_tp));
251 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
252 res->attr.cast.totype = to_tp;
253 res = optimize_node(res);
254 IRN_VRFY_IRG(res, irg);
259 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_bd_Add (dbg_info *db, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
276 ir_graph *irg = current_ir_graph;
280 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
281 res = optimize_node(res);
282 IRN_VRFY_IRG(res, irg);
287 new_bd_Sub (dbg_info *db, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
297 res = optimize_node (res);
298 IRN_VRFY_IRG(res, irg);
304 new_bd_Minus (dbg_info *db, ir_node *block,
305 ir_node *op, ir_mode *mode)
308 ir_graph *irg = current_ir_graph;
310 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
311 res = optimize_node(res);
312 IRN_VRFY_IRG(res, irg);
317 new_bd_Mul (dbg_info *db, ir_node *block,
318 ir_node *op1, ir_node *op2, ir_mode *mode)
322 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
327 res = optimize_node(res);
328 IRN_VRFY_IRG(res, irg);
333 new_bd_Quot (dbg_info *db, ir_node *block,
334 ir_node *memop, ir_node *op1, ir_node *op2)
338 ir_graph *irg = current_ir_graph;
343 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
344 res = optimize_node(res);
345 IRN_VRFY_IRG(res, irg);
350 new_bd_DivMod (dbg_info *db, ir_node *block,
351 ir_node *memop, ir_node *op1, ir_node *op2)
355 ir_graph *irg = current_ir_graph;
360 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
367 new_bd_Div (dbg_info *db, ir_node *block,
368 ir_node *memop, ir_node *op1, ir_node *op2)
372 ir_graph *irg = current_ir_graph;
377 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
378 res = optimize_node(res);
379 IRN_VRFY_IRG(res, irg);
384 new_bd_Mod (dbg_info *db, ir_node *block,
385 ir_node *memop, ir_node *op1, ir_node *op2)
389 ir_graph *irg = current_ir_graph;
394 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
395 res = optimize_node(res);
396 IRN_VRFY_IRG(res, irg);
401 new_bd_And (dbg_info *db, ir_node *block,
402 ir_node *op1, ir_node *op2, ir_mode *mode)
406 ir_graph *irg = current_ir_graph;
410 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_bd_Or (dbg_info *db, ir_node *block,
418 ir_node *op1, ir_node *op2, ir_mode *mode)
422 ir_graph *irg = current_ir_graph;
426 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
427 res = optimize_node(res);
428 IRN_VRFY_IRG(res, irg);
433 new_bd_Eor (dbg_info *db, ir_node *block,
434 ir_node *op1, ir_node *op2, ir_mode *mode)
438 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
443 res = optimize_node (res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_Not (dbg_info *db, ir_node *block,
450 ir_node *op, ir_mode *mode)
453 ir_graph *irg = current_ir_graph;
455 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_bd_Shl (dbg_info *db, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
467 ir_graph *irg = current_ir_graph;
471 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Shr (dbg_info *db, ir_node *block,
479 ir_node *op, ir_node *k, ir_mode *mode)
483 ir_graph *irg = current_ir_graph;
487 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_bd_Shrs (dbg_info *db, ir_node *block,
495 ir_node *op, ir_node *k, ir_mode *mode)
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
504 res = optimize_node(res);
505 IRN_VRFY_IRG(res, irg);
510 new_bd_Rot (dbg_info *db, ir_node *block,
511 ir_node *op, ir_node *k, ir_mode *mode)
515 ir_graph *irg = current_ir_graph;
519 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
520 res = optimize_node(res);
521 IRN_VRFY_IRG(res, irg);
526 new_bd_Abs (dbg_info *db, ir_node *block,
527 ir_node *op, ir_mode *mode)
530 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
533 res = optimize_node (res);
534 IRN_VRFY_IRG(res, irg);
539 new_bd_Cmp (dbg_info *db, ir_node *block,
540 ir_node *op1, ir_node *op2)
544 ir_graph *irg = current_ir_graph;
549 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Jmp (dbg_info *db, ir_node *block)
559 ir_graph *irg = current_ir_graph;
561 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
562 res = optimize_node (res);
563 IRN_VRFY_IRG (res, irg);
568 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
571 ir_graph *irg = current_ir_graph;
573 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
574 res = optimize_node (res);
575 IRN_VRFY_IRG (res, irg);
577 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
583 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
586 ir_graph *irg = current_ir_graph;
588 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
589 res->attr.c.kind = dense;
590 res->attr.c.default_proj = 0;
591 res->attr.c.pred = COND_JMP_PRED_NONE;
592 res = optimize_node (res);
593 IRN_VRFY_IRG(res, irg);
598 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
599 ir_node *callee, int arity, ir_node **in, ir_type *tp)
604 ir_graph *irg = current_ir_graph;
607 NEW_ARR_A(ir_node *, r_in, r_arity);
610 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
612 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
614 assert((get_unknown_type() == tp) || is_Method_type(tp));
615 set_Call_type(res, tp);
616 res->attr.call.exc.pin_state = op_pin_state_pinned;
617 res->attr.call.callee_arr = NULL;
618 res = optimize_node(res);
619 IRN_VRFY_IRG(res, irg);
624 new_bd_Return (dbg_info *db, ir_node *block,
625 ir_node *store, int arity, ir_node **in)
630 ir_graph *irg = current_ir_graph;
633 NEW_ARR_A (ir_node *, r_in, r_arity);
635 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
636 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_Load (dbg_info *db, ir_node *block,
644 ir_node *store, ir_node *adr, ir_mode *mode)
648 ir_graph *irg = current_ir_graph;
652 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
653 res->attr.load.exc.pin_state = op_pin_state_pinned;
654 res->attr.load.load_mode = mode;
655 res->attr.load.volatility = volatility_non_volatile;
656 res = optimize_node(res);
657 IRN_VRFY_IRG(res, irg);
662 new_bd_Store (dbg_info *db, ir_node *block,
663 ir_node *store, ir_node *adr, ir_node *val)
667 ir_graph *irg = current_ir_graph;
672 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
673 res->attr.store.exc.pin_state = op_pin_state_pinned;
674 res->attr.store.volatility = volatility_non_volatile;
675 res = optimize_node(res);
676 IRN_VRFY_IRG(res, irg);
681 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
682 ir_node *size, ir_type *alloc_type, where_alloc where)
686 ir_graph *irg = current_ir_graph;
690 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
691 res->attr.a.exc.pin_state = op_pin_state_pinned;
692 res->attr.a.where = where;
693 res->attr.a.type = alloc_type;
694 res = optimize_node(res);
695 IRN_VRFY_IRG(res, irg);
700 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
701 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
705 ir_graph *irg = current_ir_graph;
710 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
711 res->attr.f.where = where;
712 res->attr.f.type = free_type;
713 res = optimize_node(res);
714 IRN_VRFY_IRG(res, irg);
719 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
720 int arity, ir_node **in, entity *ent)
725 ir_graph *irg = current_ir_graph;
727 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
730 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
733 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
735 * FIXM: Sel's can select functions which should be of mode mode_P_code.
737 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
738 res->attr.s.ent = ent;
739 res = optimize_node(res);
740 IRN_VRFY_IRG(res, irg);
745 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
746 symconst_kind symkind, ir_type *tp) {
749 ir_graph *irg = current_ir_graph;
751 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
752 mode = mode_P_data; /* FIXME: can be mode_P_code */
756 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
758 res->attr.i.num = symkind;
759 res->attr.i.sym = value;
762 res = optimize_node(res);
763 IRN_VRFY_IRG(res, irg);
768 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
769 symconst_kind symkind)
771 ir_graph *irg = current_ir_graph;
773 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
778 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
781 ir_graph *irg = current_ir_graph;
783 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
784 res = optimize_node(res);
785 IRN_VRFY_IRG(res, irg);
790 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
792 ir_node *in[2], *res;
793 ir_graph *irg = current_ir_graph;
797 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
798 res->attr.confirm_cmp = cmp;
799 res = optimize_node (res);
800 IRN_VRFY_IRG(res, irg);
804 /* this function is often called with current_ir_graph unset */
806 new_bd_Unknown (ir_mode *m)
809 ir_graph *irg = current_ir_graph;
811 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
812 res = optimize_node(res);
817 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
821 ir_graph *irg = current_ir_graph;
823 in[0] = get_Call_ptr(call);
824 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
825 /* res->attr.callbegin.irg = irg; */
826 res->attr.callbegin.call = call;
827 res = optimize_node(res);
828 IRN_VRFY_IRG(res, irg);
833 new_bd_EndReg (dbg_info *db, ir_node *block)
836 ir_graph *irg = current_ir_graph;
838 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
840 IRN_VRFY_IRG(res, irg);
845 new_bd_EndExcept (dbg_info *db, ir_node *block)
848 ir_graph *irg = current_ir_graph;
850 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
851 irg->end_except = res;
852 IRN_VRFY_IRG (res, irg);
857 new_bd_Break (dbg_info *db, ir_node *block)
860 ir_graph *irg = current_ir_graph;
862 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
863 res = optimize_node(res);
864 IRN_VRFY_IRG(res, irg);
869 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
873 ir_graph *irg = current_ir_graph;
875 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
876 res->attr.filter.proj = proj;
877 res->attr.filter.in_cg = NULL;
878 res->attr.filter.backedge = NULL;
881 assert(get_Proj_pred(res));
882 assert(get_nodes_block(get_Proj_pred(res)));
884 res = optimize_node(res);
885 IRN_VRFY_IRG(res, irg);
890 new_bd_Mux (dbg_info *db, ir_node *block,
891 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
895 ir_graph *irg = current_ir_graph;
901 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
904 res = optimize_node(res);
905 IRN_VRFY_IRG(res, irg);
910 new_bd_CopyB (dbg_info *db, ir_node *block,
911 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
915 ir_graph *irg = current_ir_graph;
921 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
923 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
924 res->attr.copyb.data_type = data_type;
925 res = optimize_node(res);
926 IRN_VRFY_IRG(res, irg);
931 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
932 ir_node *objptr, ir_type *type)
936 ir_graph *irg = current_ir_graph;
940 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
941 res->attr.io.type = type;
942 res = optimize_node(res);
943 IRN_VRFY_IRG(res, irg);
948 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
952 ir_graph *irg = current_ir_graph;
956 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
957 res = optimize_node(res);
958 IRN_VRFY_IRG(res, irg);
963 new_bd_Bound (dbg_info *db, ir_node *block,
964 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
968 ir_graph *irg = current_ir_graph;
974 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
975 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
976 res = optimize_node(res);
977 IRN_VRFY_IRG(res, irg);
981 /* --------------------------------------------- */
982 /* private interfaces, for professional use only */
983 /* --------------------------------------------- */
985 /* Constructs a Block with a fixed number of predecessors.
986 Does not set current_block. Can not be used with automatic
987 Phi node construction. */
989 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
991 ir_graph *rem = current_ir_graph;
994 current_ir_graph = irg;
995 res = new_bd_Block (db, arity, in);
996 current_ir_graph = rem;
1002 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
1004 ir_graph *rem = current_ir_graph;
1007 current_ir_graph = irg;
1008 res = new_bd_Start (db, block);
1009 current_ir_graph = rem;
1015 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
1018 ir_graph *rem = current_ir_graph;
1020 current_ir_graph = rem;
1021 res = new_bd_End (db, block);
1022 current_ir_graph = rem;
1027 /* Creates a Phi node with all predecessors. Calling this constructor
1028 is only allowed if the corresponding block is mature. */
1030 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
1033 ir_graph *rem = current_ir_graph;
1035 current_ir_graph = irg;
1036 res = new_bd_Phi (db, block,arity, in, mode);
1037 current_ir_graph = rem;
1043 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
1046 ir_graph *rem = current_ir_graph;
1048 current_ir_graph = irg;
1049 res = new_bd_Const_type (db, block, mode, con, tp);
1050 current_ir_graph = rem;
1056 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
1059 ir_graph *rem = current_ir_graph;
1061 current_ir_graph = irg;
1062 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
1063 current_ir_graph = rem;
1069 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
1071 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
1075 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
1078 ir_graph *rem = current_ir_graph;
1080 current_ir_graph = irg;
1081 res = new_bd_Id(db, block, val, mode);
1082 current_ir_graph = rem;
1088 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_Proj(db, block, arg, mode, proj);
1096 current_ir_graph = rem;
1102 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1106 ir_graph *rem = current_ir_graph;
1108 current_ir_graph = irg;
1109 res = new_bd_defaultProj(db, block, arg, max_proj);
1110 current_ir_graph = rem;
1116 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1119 ir_graph *rem = current_ir_graph;
1121 current_ir_graph = irg;
1122 res = new_bd_Conv(db, block, op, mode);
1123 current_ir_graph = rem;
1129 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1132 ir_graph *rem = current_ir_graph;
1134 current_ir_graph = irg;
1135 res = new_bd_Cast(db, block, op, to_tp);
1136 current_ir_graph = rem;
1142 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1145 ir_graph *rem = current_ir_graph;
1147 current_ir_graph = irg;
1148 res = new_bd_Tuple(db, block, arity, in);
1149 current_ir_graph = rem;
1155 new_rd_Add (dbg_info *db, ir_graph *irg, ir_node *block,
1156 ir_node *op1, ir_node *op2, ir_mode *mode)
1159 ir_graph *rem = current_ir_graph;
1161 current_ir_graph = irg;
1162 res = new_bd_Add(db, block, op1, op2, mode);
1163 current_ir_graph = rem;
1169 new_rd_Sub (dbg_info *db, ir_graph *irg, ir_node *block,
1170 ir_node *op1, ir_node *op2, ir_mode *mode)
1173 ir_graph *rem = current_ir_graph;
1175 current_ir_graph = irg;
1176 res = new_bd_Sub(db, block, op1, op2, mode);
1177 current_ir_graph = rem;
1183 new_rd_Minus (dbg_info *db, ir_graph *irg, ir_node *block,
1184 ir_node *op, ir_mode *mode)
1187 ir_graph *rem = current_ir_graph;
1189 current_ir_graph = irg;
1190 res = new_bd_Minus(db, block, op, mode);
1191 current_ir_graph = rem;
1197 new_rd_Mul (dbg_info *db, ir_graph *irg, ir_node *block,
1198 ir_node *op1, ir_node *op2, ir_mode *mode)
1201 ir_graph *rem = current_ir_graph;
1203 current_ir_graph = irg;
1204 res = new_bd_Mul(db, block, op1, op2, mode);
1205 current_ir_graph = rem;
1211 new_rd_Quot (dbg_info *db, ir_graph *irg, ir_node *block,
1212 ir_node *memop, ir_node *op1, ir_node *op2)
1215 ir_graph *rem = current_ir_graph;
1217 current_ir_graph = irg;
1218 res = new_bd_Quot(db, block, memop, op1, op2);
1219 current_ir_graph = rem;
1225 new_rd_DivMod (dbg_info *db, ir_graph *irg, ir_node *block,
1226 ir_node *memop, ir_node *op1, ir_node *op2)
1229 ir_graph *rem = current_ir_graph;
1231 current_ir_graph = irg;
1232 res = new_bd_DivMod(db, block, memop, op1, op2);
1233 current_ir_graph = rem;
1239 new_rd_Div (dbg_info *db, ir_graph *irg, ir_node *block,
1240 ir_node *memop, ir_node *op1, ir_node *op2)
1243 ir_graph *rem = current_ir_graph;
1245 current_ir_graph = irg;
1246 res = new_bd_Div (db, block, memop, op1, op2);
1247 current_ir_graph =rem;
1253 new_rd_Mod (dbg_info *db, ir_graph *irg, ir_node *block,
1254 ir_node *memop, ir_node *op1, ir_node *op2)
1257 ir_graph *rem = current_ir_graph;
1259 current_ir_graph = irg;
1260 res = new_bd_Mod(db, block, memop, op1, op2);
1261 current_ir_graph = rem;
1267 new_rd_And (dbg_info *db, ir_graph *irg, ir_node *block,
1268 ir_node *op1, ir_node *op2, ir_mode *mode)
1271 ir_graph *rem = current_ir_graph;
1273 current_ir_graph = irg;
1274 res = new_bd_And(db, block, op1, op2, mode);
1275 current_ir_graph = rem;
1281 new_rd_Or (dbg_info *db, ir_graph *irg, ir_node *block,
1282 ir_node *op1, ir_node *op2, ir_mode *mode)
1285 ir_graph *rem = current_ir_graph;
1287 current_ir_graph = irg;
1288 res = new_bd_Or(db, block, op1, op2, mode);
1289 current_ir_graph = rem;
1295 new_rd_Eor (dbg_info *db, ir_graph *irg, ir_node *block,
1296 ir_node *op1, ir_node *op2, ir_mode *mode)
1299 ir_graph *rem = current_ir_graph;
1301 current_ir_graph = irg;
1302 res = new_bd_Eor(db, block, op1, op2, mode);
1303 current_ir_graph = rem;
1309 new_rd_Not (dbg_info *db, ir_graph *irg, ir_node *block,
1310 ir_node *op, ir_mode *mode)
1313 ir_graph *rem = current_ir_graph;
1315 current_ir_graph = irg;
1316 res = new_bd_Not(db, block, op, mode);
1317 current_ir_graph = rem;
1323 new_rd_Shl (dbg_info *db, ir_graph *irg, ir_node *block,
1324 ir_node *op, ir_node *k, ir_mode *mode)
1327 ir_graph *rem = current_ir_graph;
1329 current_ir_graph = irg;
1330 res = new_bd_Shl (db, block, op, k, mode);
1331 current_ir_graph = rem;
1337 new_rd_Shr (dbg_info *db, ir_graph *irg, ir_node *block,
1338 ir_node *op, ir_node *k, ir_mode *mode)
1341 ir_graph *rem = current_ir_graph;
1343 current_ir_graph = irg;
1344 res = new_bd_Shr(db, block, op, k, mode);
1345 current_ir_graph = rem;
1351 new_rd_Shrs (dbg_info *db, ir_graph *irg, ir_node *block,
1352 ir_node *op, ir_node *k, ir_mode *mode)
1355 ir_graph *rem = current_ir_graph;
1357 current_ir_graph = irg;
1358 res = new_bd_Shrs(db, block, op, k, mode);
1359 current_ir_graph = rem;
1365 new_rd_Rot (dbg_info *db, ir_graph *irg, ir_node *block,
1366 ir_node *op, ir_node *k, ir_mode *mode)
1369 ir_graph *rem = current_ir_graph;
1371 current_ir_graph = irg;
1372 res = new_bd_Rot(db, block, op, k, mode);
1373 current_ir_graph = rem;
1379 new_rd_Abs (dbg_info *db, ir_graph *irg, ir_node *block,
1380 ir_node *op, ir_mode *mode)
1383 ir_graph *rem = current_ir_graph;
1385 current_ir_graph = irg;
1386 res = new_bd_Abs(db, block, op, mode);
1387 current_ir_graph = rem;
1393 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1394 ir_node *op1, ir_node *op2)
1397 ir_graph *rem = current_ir_graph;
1399 current_ir_graph = irg;
1400 res = new_bd_Cmp(db, block, op1, op2);
1401 current_ir_graph = rem;
1407 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1410 ir_graph *rem = current_ir_graph;
1412 current_ir_graph = irg;
1413 res = new_bd_Jmp(db, block);
1414 current_ir_graph = rem;
1420 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1423 ir_graph *rem = current_ir_graph;
1425 current_ir_graph = irg;
1426 res = new_bd_IJmp(db, block, tgt);
1427 current_ir_graph = rem;
1433 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1436 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1439 res = new_bd_Cond(db, block, c);
1440 current_ir_graph = rem;
1446 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1447 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1450 ir_graph *rem = current_ir_graph;
1452 current_ir_graph = irg;
1453 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1454 current_ir_graph = rem;
1460 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1461 ir_node *store, int arity, ir_node **in)
1464 ir_graph *rem = current_ir_graph;
1466 current_ir_graph = irg;
1467 res = new_bd_Return(db, block, store, arity, in);
1468 current_ir_graph = rem;
1474 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1475 ir_node *store, ir_node *adr, ir_mode *mode)
1478 ir_graph *rem = current_ir_graph;
1480 current_ir_graph = irg;
1481 res = new_bd_Load(db, block, store, adr, mode);
1482 current_ir_graph = rem;
1488 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1489 ir_node *store, ir_node *adr, ir_node *val)
1492 ir_graph *rem = current_ir_graph;
1494 current_ir_graph = irg;
1495 res = new_bd_Store(db, block, store, adr, val);
1496 current_ir_graph = rem;
1502 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1503 ir_node *size, ir_type *alloc_type, where_alloc where)
1506 ir_graph *rem = current_ir_graph;
1508 current_ir_graph = irg;
1509 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1510 current_ir_graph = rem;
1516 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1517 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1520 ir_graph *rem = current_ir_graph;
1522 current_ir_graph = irg;
1523 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1524 current_ir_graph = rem;
1530 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1531 ir_node *store, ir_node *objptr, entity *ent)
1534 ir_graph *rem = current_ir_graph;
1536 current_ir_graph = irg;
1537 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1538 current_ir_graph = rem;
1544 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1545 int arity, ir_node **in, entity *ent)
1548 ir_graph *rem = current_ir_graph;
1550 current_ir_graph = irg;
1551 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1552 current_ir_graph = rem;
1558 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1559 symconst_kind symkind, ir_type *tp)
1562 ir_graph *rem = current_ir_graph;
1564 current_ir_graph = irg;
1565 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1566 current_ir_graph = rem;
1572 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1573 symconst_kind symkind)
1575 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1579 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1581 symconst_symbol sym = {(ir_type *)symbol};
1582 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1585 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1586 symconst_symbol sym = {(ir_type *)symbol};
1587 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1590 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1591 symconst_symbol sym = {symbol};
1592 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1595 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1596 symconst_symbol sym = {symbol};
1597 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1601 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1604 ir_graph *rem = current_ir_graph;
1606 current_ir_graph = irg;
1607 res = new_bd_Sync(db, block, arity, in);
1608 current_ir_graph = rem;
1614 new_rd_Bad (ir_graph *irg)
1620 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1623 ir_graph *rem = current_ir_graph;
1625 current_ir_graph = irg;
1626 res = new_bd_Confirm(db, block, val, bound, cmp);
1627 current_ir_graph = rem;
1632 /* this function is often called with current_ir_graph unset */
1634 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1637 ir_graph *rem = current_ir_graph;
1639 current_ir_graph = irg;
1640 res = new_bd_Unknown(m);
1641 current_ir_graph = rem;
1647 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1650 ir_graph *rem = current_ir_graph;
1652 current_ir_graph = irg;
1653 res = new_bd_CallBegin(db, block, call);
1654 current_ir_graph = rem;
1660 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1664 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1666 IRN_VRFY_IRG(res, irg);
1671 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1675 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1676 irg->end_except = res;
1677 IRN_VRFY_IRG (res, irg);
1682 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1685 ir_graph *rem = current_ir_graph;
1687 current_ir_graph = irg;
1688 res = new_bd_Break(db, block);
1689 current_ir_graph = rem;
1695 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1699 ir_graph *rem = current_ir_graph;
1701 current_ir_graph = irg;
1702 res = new_bd_Filter(db, block, arg, mode, proj);
1703 current_ir_graph = rem;
1709 new_rd_NoMem (ir_graph *irg) {
1714 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1715 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1718 ir_graph *rem = current_ir_graph;
1720 current_ir_graph = irg;
1721 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1722 current_ir_graph = rem;
1727 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1728 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1731 ir_graph *rem = current_ir_graph;
1733 current_ir_graph = irg;
1734 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1735 current_ir_graph = rem;
1741 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1742 ir_node *objptr, ir_type *type)
1745 ir_graph *rem = current_ir_graph;
1747 current_ir_graph = irg;
1748 res = new_bd_InstOf(db, block, store, objptr, type);
1749 current_ir_graph = rem;
1755 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1758 ir_graph *rem = current_ir_graph;
1760 current_ir_graph = irg;
1761 res = new_bd_Raise(db, block, store, obj);
1762 current_ir_graph = rem;
1767 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1768 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1771 ir_graph *rem = current_ir_graph;
1773 current_ir_graph = irg;
1774 res = new_bd_Bound(db, block, store, idx, lower, upper);
1775 current_ir_graph = rem;
1780 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1781 return new_rd_Block(NULL, irg, arity, in);
1783 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1784 return new_rd_Start(NULL, irg, block);
1786 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1787 return new_rd_End(NULL, irg, block);
1789 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1790 return new_rd_Jmp(NULL, irg, block);
1792 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1793 return new_rd_IJmp(NULL, irg, block, tgt);
1795 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1796 return new_rd_Cond(NULL, irg, block, c);
1798 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1799 ir_node *store, int arity, ir_node **in) {
1800 return new_rd_Return(NULL, irg, block, store, arity, in);
1802 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1803 ir_mode *mode, tarval *con) {
1804 return new_rd_Const(NULL, irg, block, mode, con);
1807 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1808 ir_mode *mode, long value) {
1809 return new_rd_Const_long(NULL, irg, block, mode, value);
1812 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1813 ir_mode *mode, tarval *con, ir_type *tp) {
1814 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1817 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1818 symconst_symbol value, symconst_kind symkind) {
1819 return new_rd_SymConst(NULL, irg, block, value, symkind);
1821 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1822 ir_node *objptr, int n_index, ir_node **index,
1824 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1826 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1827 ir_node *callee, int arity, ir_node **in,
1829 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1831 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1832 ir_node *op1, ir_node *op2, ir_mode *mode) {
1833 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1835 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1836 ir_node *op1, ir_node *op2, ir_mode *mode) {
1837 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1839 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1840 ir_node *op, ir_mode *mode) {
1841 return new_rd_Minus(NULL, irg, block, op, mode);
1843 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1844 ir_node *op1, ir_node *op2, ir_mode *mode) {
1845 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1847 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1848 ir_node *memop, ir_node *op1, ir_node *op2) {
1849 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1851 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1852 ir_node *memop, ir_node *op1, ir_node *op2) {
1853 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1855 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1856 ir_node *memop, ir_node *op1, ir_node *op2) {
1857 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1859 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1860 ir_node *memop, ir_node *op1, ir_node *op2) {
1861 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1863 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1864 ir_node *op, ir_mode *mode) {
1865 return new_rd_Abs(NULL, irg, block, op, mode);
1867 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1868 ir_node *op1, ir_node *op2, ir_mode *mode) {
1869 return new_rd_And(NULL, irg, block, op1, op2, mode);
1871 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1872 ir_node *op1, ir_node *op2, ir_mode *mode) {
1873 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1875 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1876 ir_node *op1, ir_node *op2, ir_mode *mode) {
1877 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1879 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1880 ir_node *op, ir_mode *mode) {
1881 return new_rd_Not(NULL, irg, block, op, mode);
1883 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1884 ir_node *op1, ir_node *op2) {
1885 return new_rd_Cmp(NULL, irg, block, op1, op2);
1887 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1888 ir_node *op, ir_node *k, ir_mode *mode) {
1889 return new_rd_Shl(NULL, irg, block, op, k, mode);
1891 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1892 ir_node *op, ir_node *k, ir_mode *mode) {
1893 return new_rd_Shr(NULL, irg, block, op, k, mode);
1895 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1896 ir_node *op, ir_node *k, ir_mode *mode) {
1897 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1899 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1900 ir_node *op, ir_node *k, ir_mode *mode) {
1901 return new_rd_Rot(NULL, irg, block, op, k, mode);
1903 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1904 ir_node *op, ir_mode *mode) {
1905 return new_rd_Conv(NULL, irg, block, op, mode);
1907 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1908 return new_rd_Cast(NULL, irg, block, op, to_tp);
1910 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1911 ir_node **in, ir_mode *mode) {
1912 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1914 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1915 ir_node *store, ir_node *adr, ir_mode *mode) {
1916 return new_rd_Load(NULL, irg, block, store, adr, mode);
1918 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1919 ir_node *store, ir_node *adr, ir_node *val) {
1920 return new_rd_Store(NULL, irg, block, store, adr, val);
1922 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1923 ir_node *size, ir_type *alloc_type, where_alloc where) {
1924 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1926 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1927 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1928 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1930 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1931 return new_rd_Sync(NULL, irg, block, arity, in);
1933 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1934 ir_mode *mode, long proj) {
1935 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1937 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1939 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1941 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1942 int arity, ir_node **in) {
1943 return new_rd_Tuple(NULL, irg, block, arity, in );
1945 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1946 ir_node *val, ir_mode *mode) {
1947 return new_rd_Id(NULL, irg, block, val, mode);
1949 ir_node *new_r_Bad (ir_graph *irg) {
1950 return new_rd_Bad(irg);
1952 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1953 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1955 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1956 return new_rd_Unknown(irg, m);
1958 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1959 return new_rd_CallBegin(NULL, irg, block, callee);
1961 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1962 return new_rd_EndReg(NULL, irg, block);
1964 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1965 return new_rd_EndExcept(NULL, irg, block);
1967 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1968 return new_rd_Break(NULL, irg, block);
1970 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1971 ir_mode *mode, long proj) {
1972 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1974 ir_node *new_r_NoMem (ir_graph *irg) {
1975 return new_rd_NoMem(irg);
1977 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1978 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1979 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1981 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1982 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1983 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1985 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1987 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1989 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1990 ir_node *store, ir_node *obj) {
1991 return new_rd_Raise(NULL, irg, block, store, obj);
1993 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1994 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1995 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1998 /** ********************/
1999 /** public interfaces */
2000 /** construction tools */
2004 * - create a new Start node in the current block
2006 * @return s - pointer to the created Start node
2011 new_d_Start (dbg_info *db)
2015 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
2016 op_Start, mode_T, 0, NULL);
2017 /* res->attr.start.irg = current_ir_graph; */
2019 res = optimize_node(res);
2020 IRN_VRFY_IRG(res, current_ir_graph);
2025 new_d_End (dbg_info *db)
2028 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
2029 op_End, mode_X, -1, NULL);
2030 res = optimize_node(res);
2031 IRN_VRFY_IRG(res, current_ir_graph);
2036 /* Constructs a Block with a fixed number of predecessors.
2037 Does set current_block. Can be used with automatic Phi
2038 node construction. */
2040 new_d_Block (dbg_info *db, int arity, ir_node **in)
2044 int has_unknown = 0;
2046 res = new_bd_Block(db, arity, in);
2048 /* Create and initialize array for Phi-node construction. */
2049 if (get_irg_phase_state(current_ir_graph) == phase_building) {
2050 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2051 current_ir_graph->n_loc);
2052 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2055 for (i = arity-1; i >= 0; i--)
2056 if (get_irn_op(in[i]) == op_Unknown) {
2061 if (!has_unknown) res = optimize_node(res);
2062 current_ir_graph->current_block = res;
2064 IRN_VRFY_IRG(res, current_ir_graph);
2069 /* ***********************************************************************/
2070 /* Methods necessary for automatic Phi node creation */
2072 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2073 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2074 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2075 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2077 Call Graph: ( A ---> B == A "calls" B)
2079 get_value mature_immBlock
2087 get_r_value_internal |
2091 new_rd_Phi0 new_rd_Phi_in
2093 * *************************************************************************** */
2095 /** Creates a Phi node with 0 predecessors */
2096 static INLINE ir_node *
2097 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2101 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
2102 IRN_VRFY_IRG(res, irg);
2106 /* There are two implementations of the Phi node construction. The first
2107 is faster, but does not work for blocks with more than 2 predecessors.
2108 The second works always but is slower and causes more unnecessary Phi
2110 Select the implementations by the following preprocessor flag set in
2112 #if USE_FAST_PHI_CONSTRUCTION
2114 /* This is a stack used for allocating and deallocating nodes in
2115 new_rd_Phi_in. The original implementation used the obstack
2116 to model this stack, now it is explicit. This reduces side effects.
2118 #if USE_EXPLICIT_PHI_IN_STACK
2120 new_Phi_in_stack(void) {
2123 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
2125 res->stack = NEW_ARR_F (ir_node *, 0);
2132 free_Phi_in_stack(Phi_in_stack *s) {
2133 DEL_ARR_F(s->stack);
2137 free_to_Phi_in_stack(ir_node *phi) {
2138 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
2139 current_ir_graph->Phi_in_stack->pos)
2140 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
2142 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
2144 (current_ir_graph->Phi_in_stack->pos)++;
2147 static INLINE ir_node *
2148 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
2149 int arity, ir_node **in) {
2151 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
2152 int pos = current_ir_graph->Phi_in_stack->pos;
2156 /* We need to allocate a new node */
2157 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
2158 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
2160 /* reuse the old node and initialize it again. */
2163 assert (res->kind == k_ir_node);
2164 assert (res->op == op_Phi);
2168 assert (arity >= 0);
2169 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
2170 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
2172 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
2174 (current_ir_graph->Phi_in_stack->pos)--;
2178 #endif /* USE_EXPLICIT_PHI_IN_STACK */
2180 /* Creates a Phi node with a given, fixed array **in of predecessors.
2181 If the Phi node is unnecessary, as the same value reaches the block
2182 through all control flow paths, it is eliminated and the value
2183 returned directly. This constructor is only intended for use in
2184 the automatic Phi node generation triggered by get_value or mature.
2185 The implementation is quite tricky and depends on the fact, that
2186 the nodes are allocated on a stack:
2187 The in array contains predecessors and NULLs. The NULLs appear,
2188 if get_r_value_internal, that computed the predecessors, reached
2189 the same block on two paths. In this case the same value reaches
2190 this block on both paths, there is no definition in between. We need
2191 not allocate a Phi where these path's merge, but we have to communicate
2192 this fact to the caller. This happens by returning a pointer to the
2193 node the caller _will_ allocate. (Yes, we predict the address. We can
2194 do so because the nodes are allocated on the obstack.) The caller then
2195 finds a pointer to itself and, when this routine is called again,
2198 static INLINE ir_node *
2199 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2202 ir_node *res, *known;
2204 /* Allocate a new node on the obstack. This can return a node to
2205 which some of the pointers in the in-array already point.
2206 Attention: the constructor copies the in array, i.e., the later
2207 changes to the array in this routine do not affect the
2208 constructed node! If the in array contains NULLs, there will be
2209 missing predecessors in the returned node. Is this a possible
2210 internal state of the Phi node generation? */
2211 #if USE_EXPLICIT_PHI_IN_STACK
2212 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
2214 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2215 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2218 /* The in-array can contain NULLs. These were returned by
2219 get_r_value_internal if it reached the same block/definition on a
2220 second path. The NULLs are replaced by the node itself to
2221 simplify the test in the next loop. */
2222 for (i = 0; i < ins; ++i) {
2227 /* This loop checks whether the Phi has more than one predecessor.
2228 If so, it is a real Phi node and we break the loop. Else the Phi
2229 node merges the same definition on several paths and therefore is
2231 for (i = 0; i < ins; ++i) {
2232 if (in[i] == res || in[i] == known)
2241 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2243 #if USE_EXPLICIT_PHI_IN_STACK
2244 free_to_Phi_in_stack(res);
2246 edges_node_deleted(res, current_ir_graph);
2247 obstack_free(current_ir_graph->obst, res);
2251 res = optimize_node (res);
2252 IRN_VRFY_IRG(res, irg);
2255 /* return the pointer to the Phi node. This node might be deallocated! */
2260 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2263 allocates and returns this node. The routine called to allocate the
2264 node might optimize it away and return a real value, or even a pointer
2265 to a deallocated Phi node on top of the obstack!
2266 This function is called with an in-array of proper size. **/
2268 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2270 ir_node *prevBlock, *res;
2273 /* This loop goes to all predecessor blocks of the block the Phi node is in
2274 and there finds the operands of the Phi node by calling
2275 get_r_value_internal. */
2276 for (i = 1; i <= ins; ++i) {
2277 assert (block->in[i]);
2278 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2280 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2283 /* After collecting all predecessors into the array nin a new Phi node
2284 with these predecessors is created. This constructor contains an
2285 optimization: If all predecessors of the Phi node are identical it
2286 returns the only operand instead of a new Phi node. If the value
2287 passes two different control flow edges without being defined, and
2288 this is the second path treated, a pointer to the node that will be
2289 allocated for the first path (recursion) is returned. We already
2290 know the address of this node, as it is the next node to be allocated
2291 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2292 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2294 /* Now we now the value for "pos" and can enter it in the array with
2295 all known local variables. Attention: this might be a pointer to
2296 a node, that later will be allocated!!! See new_rd_Phi_in.
2297 If this is called in mature, after some set_value in the same block,
2298 the proper value must not be overwritten:
2300 get_value (makes Phi0, put's it into graph_arr)
2301 set_value (overwrites Phi0 in graph_arr)
2302 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2305 if (!block->attr.block.graph_arr[pos]) {
2306 block->attr.block.graph_arr[pos] = res;
2308 /* printf(" value already computed by %s\n",
2309 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2315 /* This function returns the last definition of a variable. In case
2316 this variable was last defined in a previous block, Phi nodes are
2317 inserted. If the part of the firm graph containing the definition
2318 is not yet constructed, a dummy Phi node is returned. */
2320 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2323 /* There are 4 cases to treat.
2325 1. The block is not mature and we visit it the first time. We can not
2326 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2327 predecessors is returned. This node is added to the linked list (field
2328 "link") of the containing block to be completed when this block is
2329 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2332 2. The value is already known in this block, graph_arr[pos] is set and we
2333 visit the block the first time. We can return the value without
2334 creating any new nodes.
2336 3. The block is mature and we visit it the first time. A Phi node needs
2337 to be created (phi_merge). If the Phi is not needed, as all it's
2338 operands are the same value reaching the block through different
2339 paths, it's optimized away and the value itself is returned.
2341 4. The block is mature, and we visit it the second time. Now two
2342 subcases are possible:
2343 * The value was computed completely the last time we were here. This
2344 is the case if there is no loop. We can return the proper value.
2345 * The recursion that visited this node and set the flag did not
2346 return yet. We are computing a value in a loop and need to
2347 break the recursion without knowing the result yet.
2348 @@@ strange case. Straight forward we would create a Phi before
2349 starting the computation of it's predecessors. In this case we will
2350 find a Phi here in any case. The problem is that this implementation
2351 only creates a Phi after computing the predecessors, so that it is
2352 hard to compute self references of this Phi. @@@
2353 There is no simple check for the second subcase. Therefore we check
2354 for a second visit and treat all such cases as the second subcase.
2355 Anyways, the basic situation is the same: we reached a block
2356 on two paths without finding a definition of the value: No Phi
2357 nodes are needed on both paths.
2358 We return this information "Two paths, no Phi needed" by a very tricky
2359 implementation that relies on the fact that an obstack is a stack and
2360 will return a node with the same address on different allocations.
2361 Look also at phi_merge and new_rd_phi_in to understand this.
2362 @@@ Unfortunately this does not work, see testprogram
2363 three_cfpred_example.
2367 /* case 4 -- already visited. */
2368 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2370 /* visited the first time */
2371 set_irn_visited(block, get_irg_visited(current_ir_graph));
2373 /* Get the local valid value */
2374 res = block->attr.block.graph_arr[pos];
2376 /* case 2 -- If the value is actually computed, return it. */
2377 if (res) return res;
2379 if (block->attr.block.matured) { /* case 3 */
2381 /* The Phi has the same amount of ins as the corresponding block. */
2382 int ins = get_irn_arity(block);
2384 NEW_ARR_A (ir_node *, nin, ins);
2386 /* Phi merge collects the predecessors and then creates a node. */
2387 res = phi_merge (block, pos, mode, nin, ins);
2389 } else { /* case 1 */
2390 /* The block is not mature, we don't know how many in's are needed. A Phi
2391 with zero predecessors is created. Such a Phi node is called Phi0
2392 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2393 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2395 The Phi0 has to remember the pos of it's internal value. If the real
2396 Phi is computed, pos is used to update the array with the local
2399 res = new_rd_Phi0 (current_ir_graph, block, mode);
2400 res->attr.phi0_pos = pos;
2401 res->link = block->link;
2405 /* If we get here, the frontend missed a use-before-definition error */
2408 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2409 assert (mode->code >= irm_F && mode->code <= irm_P);
2410 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2411 tarval_mode_null[mode->code]);
2414 /* The local valid value is available now. */
2415 block->attr.block.graph_arr[pos] = res;
2423 it starts the recursion. This causes an Id at the entry of
2424 every block that has no definition of the value! **/
2426 #if USE_EXPLICIT_PHI_IN_STACK
2428 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2429 void free_Phi_in_stack(Phi_in_stack *s) { }
2432 static INLINE ir_node *
2433 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2434 ir_node **in, int ins, ir_node *phi0)
2437 ir_node *res, *known;
2439 /* Allocate a new node on the obstack. The allocation copies the in
2441 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2442 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2444 /* This loop checks whether the Phi has more than one predecessor.
2445 If so, it is a real Phi node and we break the loop. Else the
2446 Phi node merges the same definition on several paths and therefore
2447 is not needed. Don't consider Bad nodes! */
2449 for (i=0; i < ins; ++i)
2453 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2455 /* Optimize self referencing Phis: We can't detect them yet properly, as
2456 they still refer to the Phi0 they will replace. So replace right now. */
2457 if (phi0 && in[i] == phi0) in[i] = res;
2459 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2467 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2470 edges_node_deleted(res, current_ir_graph);
2471 obstack_free (current_ir_graph->obst, res);
2472 if (is_Phi(known)) {
2473 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2474 order, an enclosing Phi know may get superfluous. */
2475 res = optimize_in_place_2(known);
2477 exchange(known, res);
2483 /* A undefined value, e.g., in unreachable code. */
2487 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2488 IRN_VRFY_IRG(res, irg);
2489 /* Memory Phis in endless loops must be kept alive.
2490 As we can't distinguish these easily we keep all of them alive. */
2491 if ((res->op == op_Phi) && (mode == mode_M))
2492 add_End_keepalive(irg->end, res);
2499 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2501 #if PRECISE_EXC_CONTEXT
2503 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2505 /* Construct a new frag_array for node n.
2506 Copy the content from the current graph_arr of the corresponding block:
2507 this is the current state.
2508 Set ProjM(n) as current memory state.
2509 Further the last entry in frag_arr of current block points to n. This
2510 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2512 static INLINE ir_node ** new_frag_arr (ir_node *n)
2517 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2518 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2519 sizeof(ir_node *)*current_ir_graph->n_loc);
2521 /* turn off optimization before allocating Proj nodes, as res isn't
2523 opt = get_opt_optimize(); set_optimize(0);
2524 /* Here we rely on the fact that all frag ops have Memory as first result! */
2525 if (get_irn_op(n) == op_Call)
2526 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2528 assert((pn_Quot_M == pn_DivMod_M) &&
2529 (pn_Quot_M == pn_Div_M) &&
2530 (pn_Quot_M == pn_Mod_M) &&
2531 (pn_Quot_M == pn_Load_M) &&
2532 (pn_Quot_M == pn_Store_M) &&
2533 (pn_Quot_M == pn_Alloc_M) );
2534 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2538 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2543 * returns the frag_arr from a node
2545 static INLINE ir_node **
2546 get_frag_arr (ir_node *n) {
2547 switch (get_irn_opcode(n)) {
2549 return n->attr.call.exc.frag_arr;
2551 return n->attr.a.exc.frag_arr;
2553 return n->attr.load.exc.frag_arr;
2555 return n->attr.store.exc.frag_arr;
2557 return n->attr.except.frag_arr;
2562 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2564 if (!frag_arr[pos]) frag_arr[pos] = val;
2565 if (frag_arr[current_ir_graph->n_loc - 1]) {
2566 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2567 assert(arr != frag_arr && "Endless recursion detected");
2568 set_frag_value(arr, pos, val);
2573 for (i = 0; i < 1000; ++i) {
2574 if (!frag_arr[pos]) {
2575 frag_arr[pos] = val;
2577 if (frag_arr[current_ir_graph->n_loc - 1]) {
2578 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2584 assert(0 && "potential endless recursion");
2589 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2593 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2595 frag_arr = get_frag_arr(cfOp);
2596 res = frag_arr[pos];
2598 if (block->attr.block.graph_arr[pos]) {
2599 /* There was a set_value after the cfOp and no get_value before that
2600 set_value. We must build a Phi node now. */
2601 if (block->attr.block.matured) {
2602 int ins = get_irn_arity(block);
2604 NEW_ARR_A (ir_node *, nin, ins);
2605 res = phi_merge(block, pos, mode, nin, ins);
2607 res = new_rd_Phi0 (current_ir_graph, block, mode);
2608 res->attr.phi0_pos = pos;
2609 res->link = block->link;
2613 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2614 but this should be better: (remove comment if this works) */
2615 /* It's a Phi, we can write this into all graph_arrs with NULL */
2616 set_frag_value(block->attr.block.graph_arr, pos, res);
2618 res = get_r_value_internal(block, pos, mode);
2619 set_frag_value(block->attr.block.graph_arr, pos, res);
2624 #endif /* PRECISE_EXC_CONTEXT */
2627 computes the predecessors for the real phi node, and then
2628 allocates and returns this node. The routine called to allocate the
2629 node might optimize it away and return a real value.
2630 This function must be called with an in-array of proper size. **/
2632 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2634 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2637 /* If this block has no value at pos create a Phi0 and remember it
2638 in graph_arr to break recursions.
2639 Else we may not set graph_arr as there a later value is remembered. */
2641 if (!block->attr.block.graph_arr[pos]) {
2642 if (block == get_irg_start_block(current_ir_graph)) {
2643 /* Collapsing to Bad tarvals is no good idea.
2644 So we call a user-supplied routine here that deals with this case as
2645 appropriate for the given language. Sorrily the only help we can give
2646 here is the position.
2648 Even if all variables are defined before use, it can happen that
2649 we get to the start block, if a Cond has been replaced by a tuple
2650 (bad, jmp). In this case we call the function needlessly, eventually
2651 generating an non existent error.
2652 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2655 if (default_initialize_local_variable) {
2656 ir_node *rem = get_cur_block();
2658 set_cur_block(block);
2659 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2663 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2664 /* We don't need to care about exception ops in the start block.
2665 There are none by definition. */
2666 return block->attr.block.graph_arr[pos];
2668 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2669 block->attr.block.graph_arr[pos] = phi0;
2670 #if PRECISE_EXC_CONTEXT
2671 if (get_opt_precise_exc_context()) {
2672 /* Set graph_arr for fragile ops. Also here we should break recursion.
2673 We could choose a cyclic path through an cfop. But the recursion would
2674 break at some point. */
2675 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2681 /* This loop goes to all predecessor blocks of the block the Phi node
2682 is in and there finds the operands of the Phi node by calling
2683 get_r_value_internal. */
2684 for (i = 1; i <= ins; ++i) {
2685 prevCfOp = skip_Proj(block->in[i]);
2687 if (is_Bad(prevCfOp)) {
2688 /* In case a Cond has been optimized we would get right to the start block
2689 with an invalid definition. */
2690 nin[i-1] = new_Bad();
2693 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2695 if (!is_Bad(prevBlock)) {
2696 #if PRECISE_EXC_CONTEXT
2697 if (get_opt_precise_exc_context() &&
2698 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2699 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2700 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2703 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2705 nin[i-1] = new_Bad();
2709 /* We want to pass the Phi0 node to the constructor: this finds additional
2710 optimization possibilities.
2711 The Phi0 node either is allocated in this function, or it comes from
2712 a former call to get_r_value_internal. In this case we may not yet
2713 exchange phi0, as this is done in mature_immBlock. */
2715 phi0_all = block->attr.block.graph_arr[pos];
2716 if (!((get_irn_op(phi0_all) == op_Phi) &&
2717 (get_irn_arity(phi0_all) == 0) &&
2718 (get_nodes_block(phi0_all) == block)))
2724 /* After collecting all predecessors into the array nin a new Phi node
2725 with these predecessors is created. This constructor contains an
2726 optimization: If all predecessors of the Phi node are identical it
2727 returns the only operand instead of a new Phi node. */
2728 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2730 /* In case we allocated a Phi0 node at the beginning of this procedure,
2731 we need to exchange this Phi0 with the real Phi. */
2733 exchange(phi0, res);
2734 block->attr.block.graph_arr[pos] = res;
2735 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2736 only an optimization. */
2742 /* This function returns the last definition of a variable. In case
2743 this variable was last defined in a previous block, Phi nodes are
2744 inserted. If the part of the firm graph containing the definition
2745 is not yet constructed, a dummy Phi node is returned. */
2747 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2750 /* There are 4 cases to treat.
2752 1. The block is not mature and we visit it the first time. We can not
2753 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2754 predecessors is returned. This node is added to the linked list (field
2755 "link") of the containing block to be completed when this block is
2756 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2759 2. The value is already known in this block, graph_arr[pos] is set and we
2760 visit the block the first time. We can return the value without
2761 creating any new nodes.
2763 3. The block is mature and we visit it the first time. A Phi node needs
2764 to be created (phi_merge). If the Phi is not needed, as all it's
2765 operands are the same value reaching the block through different
2766 paths, it's optimized away and the value itself is returned.
2768 4. The block is mature, and we visit it the second time. Now two
2769 subcases are possible:
2770 * The value was computed completely the last time we were here. This
2771 is the case if there is no loop. We can return the proper value.
2772 * The recursion that visited this node and set the flag did not
2773 return yet. We are computing a value in a loop and need to
2774 break the recursion. This case only happens if we visited
2775 the same block with phi_merge before, which inserted a Phi0.
2776 So we return the Phi0.
2779 /* case 4 -- already visited. */
2780 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2781 /* As phi_merge allocates a Phi0 this value is always defined. Here
2782 is the critical difference of the two algorithms. */
2783 assert(block->attr.block.graph_arr[pos]);
2784 return block->attr.block.graph_arr[pos];
2787 /* visited the first time */
2788 set_irn_visited(block, get_irg_visited(current_ir_graph));
2790 /* Get the local valid value */
2791 res = block->attr.block.graph_arr[pos];
2793 /* case 2 -- If the value is actually computed, return it. */
2794 if (res) { return res; };
2796 if (block->attr.block.matured) { /* case 3 */
2798 /* The Phi has the same amount of ins as the corresponding block. */
2799 int ins = get_irn_arity(block);
2801 NEW_ARR_A (ir_node *, nin, ins);
2803 /* Phi merge collects the predecessors and then creates a node. */
2804 res = phi_merge (block, pos, mode, nin, ins);
2806 } else { /* case 1 */
2807 /* The block is not mature, we don't know how many in's are needed. A Phi
2808 with zero predecessors is created. Such a Phi node is called Phi0
2809 node. The Phi0 is then added to the list of Phi0 nodes in this block
2810 to be matured by mature_immBlock later.
2811 The Phi0 has to remember the pos of it's internal value. If the real
2812 Phi is computed, pos is used to update the array with the local
2814 res = new_rd_Phi0 (current_ir_graph, block, mode);
2815 res->attr.phi0_pos = pos;
2816 res->link = block->link;
2820 /* If we get here, the frontend missed a use-before-definition error */
2823 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2824 assert (mode->code >= irm_F && mode->code <= irm_P);
2825 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2826 get_mode_null(mode));
2829 /* The local valid value is available now. */
2830 block->attr.block.graph_arr[pos] = res;
2835 #endif /* USE_FAST_PHI_CONSTRUCTION */
2837 /* ************************************************************************** */
2840 * Finalize a Block node, when all control flows are known.
2841 * Acceptable parameters are only Block nodes.
2844 mature_immBlock (ir_node *block)
2850 assert (get_irn_opcode(block) == iro_Block);
2851 /* @@@ should be commented in
2852 assert (!get_Block_matured(block) && "Block already matured"); */
2854 if (!get_Block_matured(block)) {
2855 ins = ARR_LEN (block->in)-1;
2856 /* Fix block parameters */
2857 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2859 /* An array for building the Phi nodes. */
2860 NEW_ARR_A (ir_node *, nin, ins);
2862 /* Traverse a chain of Phi nodes attached to this block and mature
2864 for (n = block->link; n; n=next) {
2865 inc_irg_visited(current_ir_graph);
2867 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2870 block->attr.block.matured = 1;
2872 /* Now, as the block is a finished firm node, we can optimize it.
2873 Since other nodes have been allocated since the block was created
2874 we can not free the node on the obstack. Therefore we have to call
2876 Unfortunately the optimization does not change a lot, as all allocated
2877 nodes refer to the unoptimized node.
2878 We can call _2, as global cse has no effect on blocks. */
2879 block = optimize_in_place_2(block);
2880 IRN_VRFY_IRG(block, current_ir_graph);
2885 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2887 return new_bd_Phi(db, current_ir_graph->current_block,
2892 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2894 return new_bd_Const(db, current_ir_graph->start_block,
2899 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2901 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2905 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2907 return new_bd_Const_type(db, current_ir_graph->start_block,
2913 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2915 return new_bd_Id(db, current_ir_graph->current_block,
2920 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2922 return new_bd_Proj(db, current_ir_graph->current_block,
2927 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2930 assert(arg->op == op_Cond);
2931 arg->attr.c.kind = fragmentary;
2932 arg->attr.c.default_proj = max_proj;
2933 res = new_Proj (arg, mode_X, max_proj);
2938 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2940 return new_bd_Conv(db, current_ir_graph->current_block,
2945 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp)
2947 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2951 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2953 return new_bd_Tuple(db, current_ir_graph->current_block,
2958 new_d_Add (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2960 return new_bd_Add(db, current_ir_graph->current_block,
2965 new_d_Sub (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2967 return new_bd_Sub(db, current_ir_graph->current_block,
2973 new_d_Minus (dbg_info *db, ir_node *op, ir_mode *mode)
2975 return new_bd_Minus(db, current_ir_graph->current_block,
2980 new_d_Mul (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2982 return new_bd_Mul(db, current_ir_graph->current_block,
2987 * allocate the frag array
2989 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2990 if (get_opt_precise_exc_context()) {
2991 if ((current_ir_graph->phase_state == phase_building) &&
2992 (get_irn_op(res) == op) && /* Could be optimized away. */
2993 !*frag_store) /* Could be a cse where the arr is already set. */ {
2994 *frag_store = new_frag_arr(res);
3001 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3004 res = new_bd_Quot (db, current_ir_graph->current_block,
3006 res->attr.except.pin_state = op_pin_state_pinned;
3007 #if PRECISE_EXC_CONTEXT
3008 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
3015 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3018 res = new_bd_DivMod (db, current_ir_graph->current_block,
3020 res->attr.except.pin_state = op_pin_state_pinned;
3021 #if PRECISE_EXC_CONTEXT
3022 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
3029 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3032 res = new_bd_Div (db, current_ir_graph->current_block,
3034 res->attr.except.pin_state = op_pin_state_pinned;
3035 #if PRECISE_EXC_CONTEXT
3036 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
3043 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3046 res = new_bd_Mod (db, current_ir_graph->current_block,
3048 res->attr.except.pin_state = op_pin_state_pinned;
3049 #if PRECISE_EXC_CONTEXT
3050 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
3057 new_d_And (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3059 return new_bd_And (db, current_ir_graph->current_block,
3064 new_d_Or (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3066 return new_bd_Or (db, current_ir_graph->current_block,
3071 new_d_Eor (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3073 return new_bd_Eor (db, current_ir_graph->current_block,
3078 new_d_Not (dbg_info *db, ir_node *op, ir_mode *mode)
3080 return new_bd_Not (db, current_ir_graph->current_block,
3085 new_d_Shl (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3087 return new_bd_Shl (db, current_ir_graph->current_block,
3092 new_d_Shr (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3094 return new_bd_Shr (db, current_ir_graph->current_block,
3099 new_d_Shrs (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3101 return new_bd_Shrs (db, current_ir_graph->current_block,
3106 new_d_Rot (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3108 return new_bd_Rot (db, current_ir_graph->current_block,
3113 new_d_Abs (dbg_info *db, ir_node *op, ir_mode *mode)
3115 return new_bd_Abs (db, current_ir_graph->current_block,
3120 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
3122 return new_bd_Cmp (db, current_ir_graph->current_block,
3127 new_d_Jmp (dbg_info *db)
3129 return new_bd_Jmp (db, current_ir_graph->current_block);
3133 new_d_IJmp (dbg_info *db, ir_node *tgt)
3135 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
3139 new_d_Cond (dbg_info *db, ir_node *c)
3141 return new_bd_Cond (db, current_ir_graph->current_block, c);
3145 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
3149 res = new_bd_Call (db, current_ir_graph->current_block,
3150 store, callee, arity, in, tp);
3151 #if PRECISE_EXC_CONTEXT
3152 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
3159 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
3161 return new_bd_Return (db, current_ir_graph->current_block,
3166 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
3169 res = new_bd_Load (db, current_ir_graph->current_block,
3171 #if PRECISE_EXC_CONTEXT
3172 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
3179 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
3182 res = new_bd_Store (db, current_ir_graph->current_block,
3184 #if PRECISE_EXC_CONTEXT
3185 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
3192 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
3196 res = new_bd_Alloc (db, current_ir_graph->current_block,
3197 store, size, alloc_type, where);
3198 #if PRECISE_EXC_CONTEXT
3199 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
3206 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
3207 ir_node *size, ir_type *free_type, where_alloc where)
3209 return new_bd_Free (db, current_ir_graph->current_block,
3210 store, ptr, size, free_type, where);
3214 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
3215 /* GL: objptr was called frame before. Frame was a bad choice for the name
3216 as the operand could as well be a pointer to a dynamic object. */
3218 return new_bd_Sel (db, current_ir_graph->current_block,
3219 store, objptr, 0, NULL, ent);
3223 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
3225 return new_bd_Sel (db, current_ir_graph->current_block,
3226 store, objptr, n_index, index, sel);
3230 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
3232 return new_bd_SymConst_type (db, current_ir_graph->start_block,
3237 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
3239 return new_bd_SymConst (db, current_ir_graph->start_block,
3244 new_d_Sync (dbg_info *db, int arity, ir_node** in)
3246 return new_bd_Sync (db, current_ir_graph->current_block,
3253 return _new_d_Bad();
3257 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
3259 return new_bd_Confirm (db, current_ir_graph->current_block,
3264 new_d_Unknown (ir_mode *m)
3266 return new_bd_Unknown(m);
3270 new_d_CallBegin (dbg_info *db, ir_node *call)
3273 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
3278 new_d_EndReg (dbg_info *db)
3281 res = new_bd_EndReg(db, current_ir_graph->current_block);
3286 new_d_EndExcept (dbg_info *db)
3289 res = new_bd_EndExcept(db, current_ir_graph->current_block);
3294 new_d_Break (dbg_info *db)
3296 return new_bd_Break (db, current_ir_graph->current_block);
3300 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
3302 return new_bd_Filter (db, current_ir_graph->current_block,
3309 return _new_d_NoMem();
3313 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
3314 ir_node *ir_true, ir_mode *mode) {
3315 return new_bd_Mux (db, current_ir_graph->current_block,
3316 sel, ir_false, ir_true, mode);
3319 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
3320 ir_node *dst, ir_node *src, ir_type *data_type) {
3322 res = new_bd_CopyB(db, current_ir_graph->current_block,
3323 store, dst, src, data_type);
3324 #if PRECISE_EXC_CONTEXT
3325 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
3331 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
3333 return new_bd_InstOf (db, current_ir_graph->current_block,
3334 store, objptr, type);
3338 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
3340 return new_bd_Raise (db, current_ir_graph->current_block,
3344 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
3345 ir_node *idx, ir_node *lower, ir_node *upper) {
3347 res = new_bd_Bound(db, current_ir_graph->current_block,
3348 store, idx, lower, upper);
3349 #if PRECISE_EXC_CONTEXT
3350 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
3355 /* ********************************************************************* */
3356 /* Comfortable interface with automatic Phi node construction. */
3357 /* (Uses also constructors of ?? interface, except new_Block. */
3358 /* ********************************************************************* */
3360 /* Block construction */
3361 /* immature Block without predecessors */
3362 ir_node *new_d_immBlock (dbg_info *db) {
3365 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3366 /* creates a new dynamic in-array as length of in is -1 */
3367 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3368 current_ir_graph->current_block = res;
3369 res->attr.block.matured = 0;
3370 res->attr.block.dead = 0;
3371 /* res->attr.block.exc = exc_normal; */
3372 /* res->attr.block.handler_entry = 0; */
3373 res->attr.block.irg = current_ir_graph;
3374 res->attr.block.backedge = NULL;
3375 res->attr.block.in_cg = NULL;
3376 res->attr.block.cg_backedge = NULL;
3377 set_Block_block_visited(res, 0);
3379 /* Create and initialize array for Phi-node construction. */
3380 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3381 current_ir_graph->n_loc);
3382 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3384 /* Immature block may not be optimized! */
3385 IRN_VRFY_IRG(res, current_ir_graph);
3391 new_immBlock (void) {
3392 return new_d_immBlock(NULL);
3395 /* add an edge to a jmp/control flow node */
3397 add_immBlock_pred (ir_node *block, ir_node *jmp)
3399 if (block->attr.block.matured) {
3400 assert(0 && "Error: Block already matured!\n");
3403 assert(jmp != NULL);
3404 ARR_APP1(ir_node *, block->in, jmp);
3408 /* changing the current block */
3410 set_cur_block (ir_node *target) {
3411 current_ir_graph->current_block = target;
3414 /* ************************ */
3415 /* parameter administration */
3417 /* get a value from the parameter array from the current block by its index */
3419 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3421 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3422 inc_irg_visited(current_ir_graph);
3424 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3426 /* get a value from the parameter array from the current block by its index */
3428 get_value (int pos, ir_mode *mode)
3430 return get_d_value(NULL, pos, mode);
3433 /* set a value at position pos in the parameter array from the current block */
3435 set_value (int pos, ir_node *value)
3437 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3438 assert(pos+1 < current_ir_graph->n_loc);
3439 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3442 /* get the current store */
3446 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3447 /* GL: one could call get_value instead */
3448 inc_irg_visited(current_ir_graph);
3449 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3452 /* set the current store */
3454 set_store (ir_node *store)
3456 /* GL: one could call set_value instead */
3457 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3458 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3462 keep_alive (ir_node *ka) {
3463 add_End_keepalive(current_ir_graph->end, ka);
3466 /* --- Useful access routines --- */
3467 /* Returns the current block of the current graph. To set the current
3468 block use set_cur_block. */
3469 ir_node *get_cur_block(void) {
3470 return get_irg_current_block(current_ir_graph);
3473 /* Returns the frame type of the current graph */
3474 ir_type *get_cur_frame_type(void) {
3475 return get_irg_frame_type(current_ir_graph);
3479 /* ********************************************************************* */
3482 /* call once for each run of the library */
3484 init_cons(uninitialized_local_variable_func_t *func)
3486 default_initialize_local_variable = func;
3489 /* call for each graph */
3491 irg_finalize_cons (ir_graph *irg) {
3492 irg->phase_state = phase_high;
3496 irp_finalize_cons (void) {
3497 int i, n_irgs = get_irp_n_irgs();
3498 for (i = 0; i < n_irgs; i++) {
3499 irg_finalize_cons(get_irp_irg(i));
3501 irp->phase_state = phase_high;\
3507 ir_node *new_Block(int arity, ir_node **in) {
3508 return new_d_Block(NULL, arity, in);
3510 ir_node *new_Start (void) {
3511 return new_d_Start(NULL);
3513 ir_node *new_End (void) {
3514 return new_d_End(NULL);
3516 ir_node *new_Jmp (void) {
3517 return new_d_Jmp(NULL);
3519 ir_node *new_IJmp (ir_node *tgt) {
3520 return new_d_IJmp(NULL, tgt);
3522 ir_node *new_Cond (ir_node *c) {
3523 return new_d_Cond(NULL, c);
3525 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3526 return new_d_Return(NULL, store, arity, in);
3528 ir_node *new_Const (ir_mode *mode, tarval *con) {
3529 return new_d_Const(NULL, mode, con);
3532 ir_node *new_Const_long(ir_mode *mode, long value)
3534 return new_d_Const_long(NULL, mode, value);
3537 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3538 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3541 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3542 return new_d_SymConst(NULL, value, kind);
3544 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3545 return new_d_simpleSel(NULL, store, objptr, ent);
3547 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3549 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3551 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3553 return new_d_Call(NULL, store, callee, arity, in, tp);
3555 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3556 return new_d_Add(NULL, op1, op2, mode);
3558 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3559 return new_d_Sub(NULL, op1, op2, mode);
3561 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3562 return new_d_Minus(NULL, op, mode);
3564 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3565 return new_d_Mul(NULL, op1, op2, mode);
3567 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3568 return new_d_Quot(NULL, memop, op1, op2);
3570 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3571 return new_d_DivMod(NULL, memop, op1, op2);
3573 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3574 return new_d_Div(NULL, memop, op1, op2);
3576 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3577 return new_d_Mod(NULL, memop, op1, op2);
3579 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3580 return new_d_Abs(NULL, op, mode);
3582 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3583 return new_d_And(NULL, op1, op2, mode);
3585 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3586 return new_d_Or(NULL, op1, op2, mode);
3588 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3589 return new_d_Eor(NULL, op1, op2, mode);
3591 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3592 return new_d_Not(NULL, op, mode);
3594 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3595 return new_d_Shl(NULL, op, k, mode);
3597 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3598 return new_d_Shr(NULL, op, k, mode);
3600 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3601 return new_d_Shrs(NULL, op, k, mode);
3603 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3604 return new_d_Rot(NULL, op, k, mode);
3606 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3607 return new_d_Cmp(NULL, op1, op2);
3609 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3610 return new_d_Conv(NULL, op, mode);
3612 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3613 return new_d_Cast(NULL, op, to_tp);
3615 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3616 return new_d_Phi(NULL, arity, in, mode);
3618 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3619 return new_d_Load(NULL, store, addr, mode);
3621 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3622 return new_d_Store(NULL, store, addr, val);
3624 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3625 where_alloc where) {
3626 return new_d_Alloc(NULL, store, size, alloc_type, where);
3628 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3629 ir_type *free_type, where_alloc where) {
3630 return new_d_Free(NULL, store, ptr, size, free_type, where);
3632 ir_node *new_Sync (int arity, ir_node **in) {
3633 return new_d_Sync(NULL, arity, in);
3635 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3636 return new_d_Proj(NULL, arg, mode, proj);
3638 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3639 return new_d_defaultProj(NULL, arg, max_proj);
3641 ir_node *new_Tuple (int arity, ir_node **in) {
3642 return new_d_Tuple(NULL, arity, in);
3644 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3645 return new_d_Id(NULL, val, mode);
3647 ir_node *new_Bad (void) {
3650 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3651 return new_d_Confirm (NULL, val, bound, cmp);
3653 ir_node *new_Unknown(ir_mode *m) {
3654 return new_d_Unknown(m);
3656 ir_node *new_CallBegin (ir_node *callee) {
3657 return new_d_CallBegin(NULL, callee);
3659 ir_node *new_EndReg (void) {
3660 return new_d_EndReg(NULL);
3662 ir_node *new_EndExcept (void) {
3663 return new_d_EndExcept(NULL);
3665 ir_node *new_Break (void) {
3666 return new_d_Break(NULL);
3668 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3669 return new_d_Filter(NULL, arg, mode, proj);
3671 ir_node *new_NoMem (void) {
3672 return new_d_NoMem();
3674 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3675 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3677 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3678 return new_d_CopyB(NULL, store, dst, src, data_type);
3680 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3681 return new_d_InstOf (NULL, store, objptr, ent);
3683 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3684 return new_d_Raise(NULL, store, obj);
3686 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3687 return new_d_Bound(NULL, store, idx, lower, upper);