3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* Constructs a Block with a fixed number of predecessors.
67 Does not set current_block. Can not be used with automatic
68 Phi node construction. */
70 new_bd_Block (dbg_info *db, int arity, ir_node **in)
73 ir_graph *irg = current_ir_graph;
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
86 res->attr.block.extblk = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_bd_Start (dbg_info *db, ir_node *block)
96 ir_graph *irg = current_ir_graph;
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_bd_End (dbg_info *db, ir_node *block)
109 ir_graph *irg = current_ir_graph;
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 ir_graph *irg = current_ir_graph;
127 /* Don't assert that block matured: the use of this constructor is strongly
129 if ( get_Block_matured(block) )
130 assert( get_irn_arity(block) == arity );
132 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
134 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
136 for (i = arity-1; i >= 0; i--)
137 if (get_irn_op(in[i]) == op_Unknown) {
142 if (!has_unknown) res = optimize_node (res);
143 IRN_VRFY_IRG(res, irg);
145 /* Memory Phis in endless loops must be kept alive.
146 As we can't distinguish these easily we keep all of them alive. */
147 if ((res->op == op_Phi) && (mode == mode_M))
148 add_End_keepalive(irg->end, res);
153 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
156 ir_graph *irg = current_ir_graph;
158 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
159 res->attr.con.tv = con;
160 set_Const_type(res, tp); /* Call method because of complex assertion. */
161 res = optimize_node (res);
162 assert(get_Const_type(res) == tp);
163 IRN_VRFY_IRG(res, irg);
169 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
171 ir_graph *irg = current_ir_graph;
173 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
177 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
179 ir_graph *irg = current_ir_graph;
181 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
185 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
191 res = optimize_node(res);
192 IRN_VRFY_IRG(res, irg);
197 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
204 res->attr.proj = proj;
207 assert(get_Proj_pred(res));
208 assert(get_nodes_block(get_Proj_pred(res)));
210 res = optimize_node(res);
212 IRN_VRFY_IRG(res, irg);
218 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
222 ir_graph *irg = current_ir_graph;
224 assert(arg->op == op_Cond);
225 arg->attr.c.kind = fragmentary;
226 arg->attr.c.default_proj = max_proj;
227 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
232 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
235 ir_graph *irg = current_ir_graph;
237 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
238 res = optimize_node(res);
239 IRN_VRFY_IRG(res, irg);
244 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
247 ir_graph *irg = current_ir_graph;
249 assert(is_atomic_type(to_tp));
251 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
252 res->attr.cast.totype = to_tp;
253 res = optimize_node(res);
254 IRN_VRFY_IRG(res, irg);
259 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_bd_Add (dbg_info *db, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
276 ir_graph *irg = current_ir_graph;
280 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
281 res = optimize_node(res);
282 IRN_VRFY_IRG(res, irg);
287 new_bd_Sub (dbg_info *db, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
297 res = optimize_node (res);
298 IRN_VRFY_IRG(res, irg);
304 new_bd_Minus (dbg_info *db, ir_node *block,
305 ir_node *op, ir_mode *mode)
308 ir_graph *irg = current_ir_graph;
310 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
311 res = optimize_node(res);
312 IRN_VRFY_IRG(res, irg);
317 new_bd_Mul (dbg_info *db, ir_node *block,
318 ir_node *op1, ir_node *op2, ir_mode *mode)
322 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
327 res = optimize_node(res);
328 IRN_VRFY_IRG(res, irg);
333 new_bd_Quot (dbg_info *db, ir_node *block,
334 ir_node *memop, ir_node *op1, ir_node *op2)
338 ir_graph *irg = current_ir_graph;
343 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
344 res = optimize_node(res);
345 IRN_VRFY_IRG(res, irg);
350 new_bd_DivMod (dbg_info *db, ir_node *block,
351 ir_node *memop, ir_node *op1, ir_node *op2)
355 ir_graph *irg = current_ir_graph;
360 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
367 new_bd_Div (dbg_info *db, ir_node *block,
368 ir_node *memop, ir_node *op1, ir_node *op2)
372 ir_graph *irg = current_ir_graph;
377 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
378 res = optimize_node(res);
379 IRN_VRFY_IRG(res, irg);
384 new_bd_Mod (dbg_info *db, ir_node *block,
385 ir_node *memop, ir_node *op1, ir_node *op2)
389 ir_graph *irg = current_ir_graph;
394 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
395 res = optimize_node(res);
396 IRN_VRFY_IRG(res, irg);
401 new_bd_And (dbg_info *db, ir_node *block,
402 ir_node *op1, ir_node *op2, ir_mode *mode)
406 ir_graph *irg = current_ir_graph;
410 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_bd_Or (dbg_info *db, ir_node *block,
418 ir_node *op1, ir_node *op2, ir_mode *mode)
422 ir_graph *irg = current_ir_graph;
426 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
427 res = optimize_node(res);
428 IRN_VRFY_IRG(res, irg);
433 new_bd_Eor (dbg_info *db, ir_node *block,
434 ir_node *op1, ir_node *op2, ir_mode *mode)
438 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
443 res = optimize_node (res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_Not (dbg_info *db, ir_node *block,
450 ir_node *op, ir_mode *mode)
453 ir_graph *irg = current_ir_graph;
455 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_bd_Shl (dbg_info *db, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
467 ir_graph *irg = current_ir_graph;
471 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Shr (dbg_info *db, ir_node *block,
479 ir_node *op, ir_node *k, ir_mode *mode)
483 ir_graph *irg = current_ir_graph;
487 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_bd_Shrs (dbg_info *db, ir_node *block,
495 ir_node *op, ir_node *k, ir_mode *mode)
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
504 res = optimize_node(res);
505 IRN_VRFY_IRG(res, irg);
510 new_bd_Rot (dbg_info *db, ir_node *block,
511 ir_node *op, ir_node *k, ir_mode *mode)
515 ir_graph *irg = current_ir_graph;
519 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
520 res = optimize_node(res);
521 IRN_VRFY_IRG(res, irg);
526 new_bd_Abs (dbg_info *db, ir_node *block,
527 ir_node *op, ir_mode *mode)
530 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
533 res = optimize_node (res);
534 IRN_VRFY_IRG(res, irg);
539 new_bd_Cmp (dbg_info *db, ir_node *block,
540 ir_node *op1, ir_node *op2)
544 ir_graph *irg = current_ir_graph;
549 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Jmp (dbg_info *db, ir_node *block)
559 ir_graph *irg = current_ir_graph;
561 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
562 res = optimize_node (res);
563 IRN_VRFY_IRG (res, irg);
568 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
571 ir_graph *irg = current_ir_graph;
573 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
574 res = optimize_node (res);
575 IRN_VRFY_IRG (res, irg);
577 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
583 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
586 ir_graph *irg = current_ir_graph;
588 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
589 res->attr.c.kind = dense;
590 res->attr.c.default_proj = 0;
591 res->attr.c.pred = COND_JMP_PRED_NONE;
592 res = optimize_node (res);
593 IRN_VRFY_IRG(res, irg);
598 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
599 ir_node *callee, int arity, ir_node **in, ir_type *tp)
604 ir_graph *irg = current_ir_graph;
607 NEW_ARR_A(ir_node *, r_in, r_arity);
610 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
612 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
614 assert((get_unknown_type() == tp) || is_Method_type(tp));
615 set_Call_type(res, tp);
616 res->attr.call.exc.pin_state = op_pin_state_pinned;
617 res->attr.call.callee_arr = NULL;
618 res = optimize_node(res);
619 IRN_VRFY_IRG(res, irg);
624 new_bd_Return (dbg_info *db, ir_node *block,
625 ir_node *store, int arity, ir_node **in)
630 ir_graph *irg = current_ir_graph;
633 NEW_ARR_A (ir_node *, r_in, r_arity);
635 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
636 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_Load (dbg_info *db, ir_node *block,
644 ir_node *store, ir_node *adr, ir_mode *mode)
648 ir_graph *irg = current_ir_graph;
652 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
653 res->attr.load.exc.pin_state = op_pin_state_pinned;
654 res->attr.load.load_mode = mode;
655 res->attr.load.volatility = volatility_non_volatile;
656 res = optimize_node(res);
657 IRN_VRFY_IRG(res, irg);
662 new_bd_Store (dbg_info *db, ir_node *block,
663 ir_node *store, ir_node *adr, ir_node *val)
667 ir_graph *irg = current_ir_graph;
672 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
673 res->attr.store.exc.pin_state = op_pin_state_pinned;
674 res->attr.store.volatility = volatility_non_volatile;
675 res = optimize_node(res);
676 IRN_VRFY_IRG(res, irg);
681 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
682 ir_node *size, ir_type *alloc_type, where_alloc where)
686 ir_graph *irg = current_ir_graph;
690 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
691 res->attr.a.exc.pin_state = op_pin_state_pinned;
692 res->attr.a.where = where;
693 res->attr.a.type = alloc_type;
694 res = optimize_node(res);
695 IRN_VRFY_IRG(res, irg);
700 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
701 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
705 ir_graph *irg = current_ir_graph;
710 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
711 res->attr.f.where = where;
712 res->attr.f.type = free_type;
713 res = optimize_node(res);
714 IRN_VRFY_IRG(res, irg);
719 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
720 int arity, ir_node **in, entity *ent)
725 ir_graph *irg = current_ir_graph;
727 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
730 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
733 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
735 * FIXM: Sel's can select functions which should be of mode mode_P_code.
737 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
738 res->attr.s.ent = ent;
739 res = optimize_node(res);
740 IRN_VRFY_IRG(res, irg);
745 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
746 symconst_kind symkind, ir_type *tp) {
749 ir_graph *irg = current_ir_graph;
751 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
752 mode = mode_P_data; /* FIXME: can be mode_P_code */
756 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
758 res->attr.i.num = symkind;
759 res->attr.i.sym = value;
762 res = optimize_node(res);
763 IRN_VRFY_IRG(res, irg);
768 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
769 symconst_kind symkind)
771 ir_graph *irg = current_ir_graph;
773 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
778 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
781 ir_graph *irg = current_ir_graph;
783 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
784 res = optimize_node(res);
785 IRN_VRFY_IRG(res, irg);
790 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
792 ir_node *in[2], *res;
793 ir_graph *irg = current_ir_graph;
797 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
798 res->attr.confirm_cmp = cmp;
799 res = optimize_node (res);
800 IRN_VRFY_IRG(res, irg);
804 /* this function is often called with current_ir_graph unset */
806 new_bd_Unknown (ir_mode *m)
809 ir_graph *irg = current_ir_graph;
811 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
812 res = optimize_node(res);
817 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
821 ir_graph *irg = current_ir_graph;
823 in[0] = get_Call_ptr(call);
824 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
825 /* res->attr.callbegin.irg = irg; */
826 res->attr.callbegin.call = call;
827 res = optimize_node(res);
828 IRN_VRFY_IRG(res, irg);
833 new_bd_EndReg (dbg_info *db, ir_node *block)
836 ir_graph *irg = current_ir_graph;
838 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
840 IRN_VRFY_IRG(res, irg);
845 new_bd_EndExcept (dbg_info *db, ir_node *block)
848 ir_graph *irg = current_ir_graph;
850 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
851 irg->end_except = res;
852 IRN_VRFY_IRG (res, irg);
857 new_bd_Break (dbg_info *db, ir_node *block)
860 ir_graph *irg = current_ir_graph;
862 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
863 res = optimize_node(res);
864 IRN_VRFY_IRG(res, irg);
869 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
873 ir_graph *irg = current_ir_graph;
875 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
876 res->attr.filter.proj = proj;
877 res->attr.filter.in_cg = NULL;
878 res->attr.filter.backedge = NULL;
881 assert(get_Proj_pred(res));
882 assert(get_nodes_block(get_Proj_pred(res)));
884 res = optimize_node(res);
885 IRN_VRFY_IRG(res, irg);
890 new_bd_Mux (dbg_info *db, ir_node *block,
891 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
895 ir_graph *irg = current_ir_graph;
901 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
904 res = optimize_node(res);
905 IRN_VRFY_IRG(res, irg);
910 new_bd_CopyB (dbg_info *db, ir_node *block,
911 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
915 ir_graph *irg = current_ir_graph;
921 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
923 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
924 res->attr.copyb.data_type = data_type;
925 res = optimize_node(res);
926 IRN_VRFY_IRG(res, irg);
931 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
932 ir_node *objptr, ir_type *type)
936 ir_graph *irg = current_ir_graph;
940 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
941 res->attr.io.type = type;
942 res = optimize_node(res);
943 IRN_VRFY_IRG(res, irg);
948 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
952 ir_graph *irg = current_ir_graph;
956 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
957 res = optimize_node(res);
958 IRN_VRFY_IRG(res, irg);
963 new_bd_Bound (dbg_info *db, ir_node *block,
964 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
968 ir_graph *irg = current_ir_graph;
974 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
975 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
976 res = optimize_node(res);
977 IRN_VRFY_IRG(res, irg);
981 /* --------------------------------------------- */
982 /* private interfaces, for professional use only */
983 /* --------------------------------------------- */
985 /* Constructs a Block with a fixed number of predecessors.
986 Does not set current_block. Can not be used with automatic
987 Phi node construction. */
989 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
991 ir_graph *rem = current_ir_graph;
994 current_ir_graph = irg;
995 res = new_bd_Block (db, arity, in);
996 current_ir_graph = rem;
1002 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
1004 ir_graph *rem = current_ir_graph;
1007 current_ir_graph = irg;
1008 res = new_bd_Start (db, block);
1009 current_ir_graph = rem;
1015 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
1018 ir_graph *rem = current_ir_graph;
1020 current_ir_graph = rem;
1021 res = new_bd_End (db, block);
1022 current_ir_graph = rem;
1027 /* Creates a Phi node with all predecessors. Calling this constructor
1028 is only allowed if the corresponding block is mature. */
1030 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
1033 ir_graph *rem = current_ir_graph;
1035 current_ir_graph = irg;
1036 res = new_bd_Phi (db, block,arity, in, mode);
1037 current_ir_graph = rem;
1043 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
1046 ir_graph *rem = current_ir_graph;
1048 current_ir_graph = irg;
1049 res = new_bd_Const_type (db, block, mode, con, tp);
1050 current_ir_graph = rem;
1056 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
1059 ir_graph *rem = current_ir_graph;
1061 current_ir_graph = irg;
1062 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
1063 current_ir_graph = rem;
1069 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
1071 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
1075 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
1078 ir_graph *rem = current_ir_graph;
1080 current_ir_graph = irg;
1081 res = new_bd_Id(db, block, val, mode);
1082 current_ir_graph = rem;
1088 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_Proj(db, block, arg, mode, proj);
1096 current_ir_graph = rem;
1102 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1106 ir_graph *rem = current_ir_graph;
1108 current_ir_graph = irg;
1109 res = new_bd_defaultProj(db, block, arg, max_proj);
1110 current_ir_graph = rem;
1116 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1119 ir_graph *rem = current_ir_graph;
1121 current_ir_graph = irg;
1122 res = new_bd_Conv(db, block, op, mode);
1123 current_ir_graph = rem;
1129 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1132 ir_graph *rem = current_ir_graph;
1134 current_ir_graph = irg;
1135 res = new_bd_Cast(db, block, op, to_tp);
1136 current_ir_graph = rem;
1142 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1145 ir_graph *rem = current_ir_graph;
1147 current_ir_graph = irg;
1148 res = new_bd_Tuple(db, block, arity, in);
1149 current_ir_graph = rem;
1155 new_rd_Add (dbg_info *db, ir_graph *irg, ir_node *block,
1156 ir_node *op1, ir_node *op2, ir_mode *mode)
1159 ir_graph *rem = current_ir_graph;
1161 current_ir_graph = irg;
1162 res = new_bd_Add(db, block, op1, op2, mode);
1163 current_ir_graph = rem;
1169 new_rd_Sub (dbg_info *db, ir_graph *irg, ir_node *block,
1170 ir_node *op1, ir_node *op2, ir_mode *mode)
1173 ir_graph *rem = current_ir_graph;
1175 current_ir_graph = irg;
1176 res = new_bd_Sub(db, block, op1, op2, mode);
1177 current_ir_graph = rem;
1183 new_rd_Minus (dbg_info *db, ir_graph *irg, ir_node *block,
1184 ir_node *op, ir_mode *mode)
1187 ir_graph *rem = current_ir_graph;
1189 current_ir_graph = irg;
1190 res = new_bd_Minus(db, block, op, mode);
1191 current_ir_graph = rem;
1197 new_rd_Mul (dbg_info *db, ir_graph *irg, ir_node *block,
1198 ir_node *op1, ir_node *op2, ir_mode *mode)
1201 ir_graph *rem = current_ir_graph;
1203 current_ir_graph = irg;
1204 res = new_bd_Mul(db, block, op1, op2, mode);
1205 current_ir_graph = rem;
1211 new_rd_Quot (dbg_info *db, ir_graph *irg, ir_node *block,
1212 ir_node *memop, ir_node *op1, ir_node *op2)
1215 ir_graph *rem = current_ir_graph;
1217 current_ir_graph = irg;
1218 res = new_bd_Quot(db, block, memop, op1, op2);
1219 current_ir_graph = rem;
1225 new_rd_DivMod (dbg_info *db, ir_graph *irg, ir_node *block,
1226 ir_node *memop, ir_node *op1, ir_node *op2)
1229 ir_graph *rem = current_ir_graph;
1231 current_ir_graph = irg;
1232 res = new_bd_DivMod(db, block, memop, op1, op2);
1233 current_ir_graph = rem;
1239 new_rd_Div (dbg_info *db, ir_graph *irg, ir_node *block,
1240 ir_node *memop, ir_node *op1, ir_node *op2)
1243 ir_graph *rem = current_ir_graph;
1245 current_ir_graph = irg;
1246 res = new_bd_Div (db, block, memop, op1, op2);
1247 current_ir_graph =rem;
1253 new_rd_Mod (dbg_info *db, ir_graph *irg, ir_node *block,
1254 ir_node *memop, ir_node *op1, ir_node *op2)
1257 ir_graph *rem = current_ir_graph;
1259 current_ir_graph = irg;
1260 res = new_bd_Mod(db, block, memop, op1, op2);
1261 current_ir_graph = rem;
1267 new_rd_And (dbg_info *db, ir_graph *irg, ir_node *block,
1268 ir_node *op1, ir_node *op2, ir_mode *mode)
1271 ir_graph *rem = current_ir_graph;
1273 current_ir_graph = irg;
1274 res = new_bd_And(db, block, op1, op2, mode);
1275 current_ir_graph = rem;
1281 new_rd_Or (dbg_info *db, ir_graph *irg, ir_node *block,
1282 ir_node *op1, ir_node *op2, ir_mode *mode)
1285 ir_graph *rem = current_ir_graph;
1287 current_ir_graph = irg;
1288 res = new_bd_Or(db, block, op1, op2, mode);
1289 current_ir_graph = rem;
1295 new_rd_Eor (dbg_info *db, ir_graph *irg, ir_node *block,
1296 ir_node *op1, ir_node *op2, ir_mode *mode)
1299 ir_graph *rem = current_ir_graph;
1301 current_ir_graph = irg;
1302 res = new_bd_Eor(db, block, op1, op2, mode);
1303 current_ir_graph = rem;
1309 new_rd_Not (dbg_info *db, ir_graph *irg, ir_node *block,
1310 ir_node *op, ir_mode *mode)
1313 ir_graph *rem = current_ir_graph;
1315 current_ir_graph = irg;
1316 res = new_bd_Not(db, block, op, mode);
1317 current_ir_graph = rem;
1323 new_rd_Shl (dbg_info *db, ir_graph *irg, ir_node *block,
1324 ir_node *op, ir_node *k, ir_mode *mode)
1327 ir_graph *rem = current_ir_graph;
1329 current_ir_graph = irg;
1330 res = new_bd_Shl (db, block, op, k, mode);
1331 current_ir_graph = rem;
1337 new_rd_Shr (dbg_info *db, ir_graph *irg, ir_node *block,
1338 ir_node *op, ir_node *k, ir_mode *mode)
1341 ir_graph *rem = current_ir_graph;
1343 current_ir_graph = irg;
1344 res = new_bd_Shr(db, block, op, k, mode);
1345 current_ir_graph = rem;
1351 new_rd_Shrs (dbg_info *db, ir_graph *irg, ir_node *block,
1352 ir_node *op, ir_node *k, ir_mode *mode)
1355 ir_graph *rem = current_ir_graph;
1357 current_ir_graph = irg;
1358 res = new_bd_Shrs(db, block, op, k, mode);
1359 current_ir_graph = rem;
1365 new_rd_Rot (dbg_info *db, ir_graph *irg, ir_node *block,
1366 ir_node *op, ir_node *k, ir_mode *mode)
1369 ir_graph *rem = current_ir_graph;
1371 current_ir_graph = irg;
1372 res = new_bd_Rot(db, block, op, k, mode);
1373 current_ir_graph = rem;
1379 new_rd_Abs (dbg_info *db, ir_graph *irg, ir_node *block,
1380 ir_node *op, ir_mode *mode)
1383 ir_graph *rem = current_ir_graph;
1385 current_ir_graph = irg;
1386 res = new_bd_Abs(db, block, op, mode);
1387 current_ir_graph = rem;
1393 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1394 ir_node *op1, ir_node *op2)
1397 ir_graph *rem = current_ir_graph;
1399 current_ir_graph = irg;
1400 res = new_bd_Cmp(db, block, op1, op2);
1401 current_ir_graph = rem;
1407 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1410 ir_graph *rem = current_ir_graph;
1412 current_ir_graph = irg;
1413 res = new_bd_Jmp(db, block);
1414 current_ir_graph = rem;
1420 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1423 ir_graph *rem = current_ir_graph;
1425 current_ir_graph = irg;
1426 res = new_bd_IJmp(db, block, tgt);
1427 current_ir_graph = rem;
1433 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1436 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1439 res = new_bd_Cond(db, block, c);
1440 current_ir_graph = rem;
1446 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1447 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1450 ir_graph *rem = current_ir_graph;
1452 current_ir_graph = irg;
1453 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1454 current_ir_graph = rem;
1460 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1461 ir_node *store, int arity, ir_node **in)
1464 ir_graph *rem = current_ir_graph;
1466 current_ir_graph = irg;
1467 res = new_bd_Return(db, block, store, arity, in);
1468 current_ir_graph = rem;
1474 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1475 ir_node *store, ir_node *adr, ir_mode *mode)
1478 ir_graph *rem = current_ir_graph;
1480 current_ir_graph = irg;
1481 res = new_bd_Load(db, block, store, adr, mode);
1482 current_ir_graph = rem;
1488 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1489 ir_node *store, ir_node *adr, ir_node *val)
1492 ir_graph *rem = current_ir_graph;
1494 current_ir_graph = irg;
1495 res = new_bd_Store(db, block, store, adr, val);
1496 current_ir_graph = rem;
1502 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1503 ir_node *size, ir_type *alloc_type, where_alloc where)
1506 ir_graph *rem = current_ir_graph;
1508 current_ir_graph = irg;
1509 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1510 current_ir_graph = rem;
1516 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1517 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1520 ir_graph *rem = current_ir_graph;
1522 current_ir_graph = irg;
1523 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1524 current_ir_graph = rem;
1530 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1531 ir_node *store, ir_node *objptr, entity *ent)
1534 ir_graph *rem = current_ir_graph;
1536 current_ir_graph = irg;
1537 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1538 current_ir_graph = rem;
1544 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1545 int arity, ir_node **in, entity *ent)
1548 ir_graph *rem = current_ir_graph;
1550 current_ir_graph = irg;
1551 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1552 current_ir_graph = rem;
1558 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1559 symconst_kind symkind, ir_type *tp)
1562 ir_graph *rem = current_ir_graph;
1564 current_ir_graph = irg;
1565 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1566 current_ir_graph = rem;
1572 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1573 symconst_kind symkind)
1575 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1579 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1581 symconst_symbol sym = {(ir_type *)symbol};
1582 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1585 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1586 symconst_symbol sym = {(ir_type *)symbol};
1587 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1590 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1591 symconst_symbol sym = {symbol};
1592 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1595 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1596 symconst_symbol sym = {symbol};
1597 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1601 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1604 ir_graph *rem = current_ir_graph;
1606 current_ir_graph = irg;
1607 res = new_bd_Sync(db, block, arity, in);
1608 current_ir_graph = rem;
1614 new_rd_Bad (ir_graph *irg)
1620 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1623 ir_graph *rem = current_ir_graph;
1625 current_ir_graph = irg;
1626 res = new_bd_Confirm(db, block, val, bound, cmp);
1627 current_ir_graph = rem;
1632 /* this function is often called with current_ir_graph unset */
1634 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1637 ir_graph *rem = current_ir_graph;
1639 current_ir_graph = irg;
1640 res = new_bd_Unknown(m);
1641 current_ir_graph = rem;
1647 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1650 ir_graph *rem = current_ir_graph;
1652 current_ir_graph = irg;
1653 res = new_bd_CallBegin(db, block, call);
1654 current_ir_graph = rem;
1660 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1664 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1666 IRN_VRFY_IRG(res, irg);
1671 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1675 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1676 irg->end_except = res;
1677 IRN_VRFY_IRG (res, irg);
1682 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1685 ir_graph *rem = current_ir_graph;
1687 current_ir_graph = irg;
1688 res = new_bd_Break(db, block);
1689 current_ir_graph = rem;
1695 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1699 ir_graph *rem = current_ir_graph;
1701 current_ir_graph = irg;
1702 res = new_bd_Filter(db, block, arg, mode, proj);
1703 current_ir_graph = rem;
1709 new_rd_NoMem (ir_graph *irg) {
1714 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1715 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1718 ir_graph *rem = current_ir_graph;
1720 current_ir_graph = irg;
1721 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1722 current_ir_graph = rem;
1727 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1728 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1731 ir_graph *rem = current_ir_graph;
1733 current_ir_graph = irg;
1734 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1735 current_ir_graph = rem;
1741 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1742 ir_node *objptr, ir_type *type)
1745 ir_graph *rem = current_ir_graph;
1747 current_ir_graph = irg;
1748 res = new_bd_InstOf(db, block, store, objptr, type);
1749 current_ir_graph = rem;
1755 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1758 ir_graph *rem = current_ir_graph;
1760 current_ir_graph = irg;
1761 res = new_bd_Raise(db, block, store, obj);
1762 current_ir_graph = rem;
1767 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1768 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1771 ir_graph *rem = current_ir_graph;
1773 current_ir_graph = irg;
1774 res = new_bd_Bound(db, block, store, idx, lower, upper);
1775 current_ir_graph = rem;
1780 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1781 return new_rd_Block(NULL, irg, arity, in);
1783 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1784 return new_rd_Start(NULL, irg, block);
1786 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1787 return new_rd_End(NULL, irg, block);
1789 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1790 return new_rd_Jmp(NULL, irg, block);
1792 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1793 return new_rd_IJmp(NULL, irg, block, tgt);
1795 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1796 return new_rd_Cond(NULL, irg, block, c);
1798 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1799 ir_node *store, int arity, ir_node **in) {
1800 return new_rd_Return(NULL, irg, block, store, arity, in);
1802 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1803 ir_mode *mode, tarval *con) {
1804 return new_rd_Const(NULL, irg, block, mode, con);
1806 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1807 ir_mode *mode, long value) {
1808 return new_rd_Const_long(NULL, irg, block, mode, value);
1810 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1811 ir_mode *mode, tarval *con, ir_type *tp) {
1812 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1814 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1815 symconst_symbol value, symconst_kind symkind) {
1816 return new_rd_SymConst(NULL, irg, block, value, symkind);
1818 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1819 ir_node *objptr, entity *ent) {
1820 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1822 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1823 ir_node *objptr, int n_index, ir_node **index,
1825 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1827 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1828 ir_node *callee, int arity, ir_node **in,
1830 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1832 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1833 ir_node *op1, ir_node *op2, ir_mode *mode) {
1834 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1836 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1837 ir_node *op1, ir_node *op2, ir_mode *mode) {
1838 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1840 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1841 ir_node *op, ir_mode *mode) {
1842 return new_rd_Minus(NULL, irg, block, op, mode);
1844 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1845 ir_node *op1, ir_node *op2, ir_mode *mode) {
1846 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1848 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1849 ir_node *memop, ir_node *op1, ir_node *op2) {
1850 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1852 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1853 ir_node *memop, ir_node *op1, ir_node *op2) {
1854 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1856 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1857 ir_node *memop, ir_node *op1, ir_node *op2) {
1858 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1860 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1861 ir_node *memop, ir_node *op1, ir_node *op2) {
1862 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1864 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1865 ir_node *op, ir_mode *mode) {
1866 return new_rd_Abs(NULL, irg, block, op, mode);
1868 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1869 ir_node *op1, ir_node *op2, ir_mode *mode) {
1870 return new_rd_And(NULL, irg, block, op1, op2, mode);
1872 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1873 ir_node *op1, ir_node *op2, ir_mode *mode) {
1874 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1876 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1877 ir_node *op1, ir_node *op2, ir_mode *mode) {
1878 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1880 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1881 ir_node *op, ir_mode *mode) {
1882 return new_rd_Not(NULL, irg, block, op, mode);
1884 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1885 ir_node *op1, ir_node *op2) {
1886 return new_rd_Cmp(NULL, irg, block, op1, op2);
1888 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1889 ir_node *op, ir_node *k, ir_mode *mode) {
1890 return new_rd_Shl(NULL, irg, block, op, k, mode);
1892 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1893 ir_node *op, ir_node *k, ir_mode *mode) {
1894 return new_rd_Shr(NULL, irg, block, op, k, mode);
1896 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1897 ir_node *op, ir_node *k, ir_mode *mode) {
1898 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1900 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1901 ir_node *op, ir_node *k, ir_mode *mode) {
1902 return new_rd_Rot(NULL, irg, block, op, k, mode);
1904 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1905 ir_node *op, ir_mode *mode) {
1906 return new_rd_Conv(NULL, irg, block, op, mode);
1908 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1909 return new_rd_Cast(NULL, irg, block, op, to_tp);
1911 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1912 ir_node **in, ir_mode *mode) {
1913 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1915 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1916 ir_node *store, ir_node *adr, ir_mode *mode) {
1917 return new_rd_Load(NULL, irg, block, store, adr, mode);
1919 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1920 ir_node *store, ir_node *adr, ir_node *val) {
1921 return new_rd_Store(NULL, irg, block, store, adr, val);
1923 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1924 ir_node *size, ir_type *alloc_type, where_alloc where) {
1925 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1927 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1928 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1929 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1931 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1932 return new_rd_Sync(NULL, irg, block, arity, in);
1934 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1935 ir_mode *mode, long proj) {
1936 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1938 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1940 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1942 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1943 int arity, ir_node **in) {
1944 return new_rd_Tuple(NULL, irg, block, arity, in );
1946 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1947 ir_node *val, ir_mode *mode) {
1948 return new_rd_Id(NULL, irg, block, val, mode);
1950 ir_node *new_r_Bad (ir_graph *irg) {
1951 return new_rd_Bad(irg);
1953 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1954 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1956 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1957 return new_rd_Unknown(irg, m);
1959 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1960 return new_rd_CallBegin(NULL, irg, block, callee);
1962 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1963 return new_rd_EndReg(NULL, irg, block);
1965 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1966 return new_rd_EndExcept(NULL, irg, block);
1968 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1969 return new_rd_Break(NULL, irg, block);
1971 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1972 ir_mode *mode, long proj) {
1973 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1975 ir_node *new_r_NoMem (ir_graph *irg) {
1976 return new_rd_NoMem(irg);
1978 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1979 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1980 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1982 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1983 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1984 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1986 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1988 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1990 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1991 ir_node *store, ir_node *obj) {
1992 return new_rd_Raise(NULL, irg, block, store, obj);
1994 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1995 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1996 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1999 /** ********************/
2000 /** public interfaces */
2001 /** construction tools */
2005 * - create a new Start node in the current block
2007 * @return s - pointer to the created Start node
2012 new_d_Start (dbg_info *db)
2016 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
2017 op_Start, mode_T, 0, NULL);
2018 /* res->attr.start.irg = current_ir_graph; */
2020 res = optimize_node(res);
2021 IRN_VRFY_IRG(res, current_ir_graph);
2026 new_d_End (dbg_info *db)
2029 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
2030 op_End, mode_X, -1, NULL);
2031 res = optimize_node(res);
2032 IRN_VRFY_IRG(res, current_ir_graph);
2037 /* Constructs a Block with a fixed number of predecessors.
2038 Does set current_block. Can be used with automatic Phi
2039 node construction. */
2041 new_d_Block (dbg_info *db, int arity, ir_node **in)
2045 int has_unknown = 0;
2047 res = new_bd_Block(db, arity, in);
2049 /* Create and initialize array for Phi-node construction. */
2050 if (get_irg_phase_state(current_ir_graph) == phase_building) {
2051 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2052 current_ir_graph->n_loc);
2053 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2056 for (i = arity-1; i >= 0; i--)
2057 if (get_irn_op(in[i]) == op_Unknown) {
2062 if (!has_unknown) res = optimize_node(res);
2063 current_ir_graph->current_block = res;
2065 IRN_VRFY_IRG(res, current_ir_graph);
2070 /* ***********************************************************************/
2071 /* Methods necessary for automatic Phi node creation */
2073 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2074 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2075 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2076 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2078 Call Graph: ( A ---> B == A "calls" B)
2080 get_value mature_immBlock
2088 get_r_value_internal |
2092 new_rd_Phi0 new_rd_Phi_in
2094 * *************************************************************************** */
2096 /** Creates a Phi node with 0 predecessors */
2097 static INLINE ir_node *
2098 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2102 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
2103 IRN_VRFY_IRG(res, irg);
2107 /* There are two implementations of the Phi node construction. The first
2108 is faster, but does not work for blocks with more than 2 predecessors.
2109 The second works always but is slower and causes more unnecessary Phi
2111 Select the implementations by the following preprocessor flag set in
2113 #if USE_FAST_PHI_CONSTRUCTION
2115 /* This is a stack used for allocating and deallocating nodes in
2116 new_rd_Phi_in. The original implementation used the obstack
2117 to model this stack, now it is explicit. This reduces side effects.
2119 #if USE_EXPLICIT_PHI_IN_STACK
2121 new_Phi_in_stack(void) {
2124 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
2126 res->stack = NEW_ARR_F (ir_node *, 0);
2133 free_Phi_in_stack(Phi_in_stack *s) {
2134 DEL_ARR_F(s->stack);
2138 free_to_Phi_in_stack(ir_node *phi) {
2139 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
2140 current_ir_graph->Phi_in_stack->pos)
2141 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
2143 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
2145 (current_ir_graph->Phi_in_stack->pos)++;
2148 static INLINE ir_node *
2149 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
2150 int arity, ir_node **in) {
2152 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
2153 int pos = current_ir_graph->Phi_in_stack->pos;
2157 /* We need to allocate a new node */
2158 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
2159 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
2161 /* reuse the old node and initialize it again. */
2164 assert (res->kind == k_ir_node);
2165 assert (res->op == op_Phi);
2169 assert (arity >= 0);
2170 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
2171 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
2173 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
2175 (current_ir_graph->Phi_in_stack->pos)--;
2179 #endif /* USE_EXPLICIT_PHI_IN_STACK */
2181 /* Creates a Phi node with a given, fixed array **in of predecessors.
2182 If the Phi node is unnecessary, as the same value reaches the block
2183 through all control flow paths, it is eliminated and the value
2184 returned directly. This constructor is only intended for use in
2185 the automatic Phi node generation triggered by get_value or mature.
2186 The implementation is quite tricky and depends on the fact, that
2187 the nodes are allocated on a stack:
2188 The in array contains predecessors and NULLs. The NULLs appear,
2189 if get_r_value_internal, that computed the predecessors, reached
2190 the same block on two paths. In this case the same value reaches
2191 this block on both paths, there is no definition in between. We need
2192 not allocate a Phi where these path's merge, but we have to communicate
2193 this fact to the caller. This happens by returning a pointer to the
2194 node the caller _will_ allocate. (Yes, we predict the address. We can
2195 do so because the nodes are allocated on the obstack.) The caller then
2196 finds a pointer to itself and, when this routine is called again,
2199 static INLINE ir_node *
2200 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2203 ir_node *res, *known;
2205 /* Allocate a new node on the obstack. This can return a node to
2206 which some of the pointers in the in-array already point.
2207 Attention: the constructor copies the in array, i.e., the later
2208 changes to the array in this routine do not affect the
2209 constructed node! If the in array contains NULLs, there will be
2210 missing predecessors in the returned node. Is this a possible
2211 internal state of the Phi node generation? */
2212 #if USE_EXPLICIT_PHI_IN_STACK
2213 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
2215 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2216 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2219 /* The in-array can contain NULLs. These were returned by
2220 get_r_value_internal if it reached the same block/definition on a
2221 second path. The NULLs are replaced by the node itself to
2222 simplify the test in the next loop. */
2223 for (i = 0; i < ins; ++i) {
2228 /* This loop checks whether the Phi has more than one predecessor.
2229 If so, it is a real Phi node and we break the loop. Else the Phi
2230 node merges the same definition on several paths and therefore is
2232 for (i = 0; i < ins; ++i) {
2233 if (in[i] == res || in[i] == known)
2242 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2244 #if USE_EXPLICIT_PHI_IN_STACK
2245 free_to_Phi_in_stack(res);
2247 edges_node_deleted(res, current_ir_graph);
2248 obstack_free(current_ir_graph->obst, res);
2252 res = optimize_node (res);
2253 IRN_VRFY_IRG(res, irg);
2256 /* return the pointer to the Phi node. This node might be deallocated! */
2261 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2264 allocates and returns this node. The routine called to allocate the
2265 node might optimize it away and return a real value, or even a pointer
2266 to a deallocated Phi node on top of the obstack!
2267 This function is called with an in-array of proper size. **/
2269 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2271 ir_node *prevBlock, *res;
2274 /* This loop goes to all predecessor blocks of the block the Phi node is in
2275 and there finds the operands of the Phi node by calling
2276 get_r_value_internal. */
2277 for (i = 1; i <= ins; ++i) {
2278 assert (block->in[i]);
2279 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2281 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2284 /* After collecting all predecessors into the array nin a new Phi node
2285 with these predecessors is created. This constructor contains an
2286 optimization: If all predecessors of the Phi node are identical it
2287 returns the only operand instead of a new Phi node. If the value
2288 passes two different control flow edges without being defined, and
2289 this is the second path treated, a pointer to the node that will be
2290 allocated for the first path (recursion) is returned. We already
2291 know the address of this node, as it is the next node to be allocated
2292 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2293 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2295 /* Now we now the value for "pos" and can enter it in the array with
2296 all known local variables. Attention: this might be a pointer to
2297 a node, that later will be allocated!!! See new_rd_Phi_in.
2298 If this is called in mature, after some set_value in the same block,
2299 the proper value must not be overwritten:
2301 get_value (makes Phi0, put's it into graph_arr)
2302 set_value (overwrites Phi0 in graph_arr)
2303 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2306 if (!block->attr.block.graph_arr[pos]) {
2307 block->attr.block.graph_arr[pos] = res;
2309 /* printf(" value already computed by %s\n",
2310 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2316 /* This function returns the last definition of a variable. In case
2317 this variable was last defined in a previous block, Phi nodes are
2318 inserted. If the part of the firm graph containing the definition
2319 is not yet constructed, a dummy Phi node is returned. */
2321 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2324 /* There are 4 cases to treat.
2326 1. The block is not mature and we visit it the first time. We can not
2327 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2328 predecessors is returned. This node is added to the linked list (field
2329 "link") of the containing block to be completed when this block is
2330 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2333 2. The value is already known in this block, graph_arr[pos] is set and we
2334 visit the block the first time. We can return the value without
2335 creating any new nodes.
2337 3. The block is mature and we visit it the first time. A Phi node needs
2338 to be created (phi_merge). If the Phi is not needed, as all it's
2339 operands are the same value reaching the block through different
2340 paths, it's optimized away and the value itself is returned.
2342 4. The block is mature, and we visit it the second time. Now two
2343 subcases are possible:
2344 * The value was computed completely the last time we were here. This
2345 is the case if there is no loop. We can return the proper value.
2346 * The recursion that visited this node and set the flag did not
2347 return yet. We are computing a value in a loop and need to
2348 break the recursion without knowing the result yet.
2349 @@@ strange case. Straight forward we would create a Phi before
2350 starting the computation of it's predecessors. In this case we will
2351 find a Phi here in any case. The problem is that this implementation
2352 only creates a Phi after computing the predecessors, so that it is
2353 hard to compute self references of this Phi. @@@
2354 There is no simple check for the second subcase. Therefore we check
2355 for a second visit and treat all such cases as the second subcase.
2356 Anyways, the basic situation is the same: we reached a block
2357 on two paths without finding a definition of the value: No Phi
2358 nodes are needed on both paths.
2359 We return this information "Two paths, no Phi needed" by a very tricky
2360 implementation that relies on the fact that an obstack is a stack and
2361 will return a node with the same address on different allocations.
2362 Look also at phi_merge and new_rd_phi_in to understand this.
2363 @@@ Unfortunately this does not work, see testprogram
2364 three_cfpred_example.
2368 /* case 4 -- already visited. */
2369 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2371 /* visited the first time */
2372 set_irn_visited(block, get_irg_visited(current_ir_graph));
2374 /* Get the local valid value */
2375 res = block->attr.block.graph_arr[pos];
2377 /* case 2 -- If the value is actually computed, return it. */
2378 if (res) return res;
2380 if (block->attr.block.matured) { /* case 3 */
2382 /* The Phi has the same amount of ins as the corresponding block. */
2383 int ins = get_irn_arity(block);
2385 NEW_ARR_A (ir_node *, nin, ins);
2387 /* Phi merge collects the predecessors and then creates a node. */
2388 res = phi_merge (block, pos, mode, nin, ins);
2390 } else { /* case 1 */
2391 /* The block is not mature, we don't know how many in's are needed. A Phi
2392 with zero predecessors is created. Such a Phi node is called Phi0
2393 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2394 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2396 The Phi0 has to remember the pos of it's internal value. If the real
2397 Phi is computed, pos is used to update the array with the local
2400 res = new_rd_Phi0 (current_ir_graph, block, mode);
2401 res->attr.phi0_pos = pos;
2402 res->link = block->link;
2406 /* If we get here, the frontend missed a use-before-definition error */
2409 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2410 assert (mode->code >= irm_F && mode->code <= irm_P);
2411 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2412 tarval_mode_null[mode->code]);
2415 /* The local valid value is available now. */
2416 block->attr.block.graph_arr[pos] = res;
2424 it starts the recursion. This causes an Id at the entry of
2425 every block that has no definition of the value! **/
2427 #if USE_EXPLICIT_PHI_IN_STACK
2429 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2430 void free_Phi_in_stack(Phi_in_stack *s) { }
2433 static INLINE ir_node *
2434 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2435 ir_node **in, int ins, ir_node *phi0)
2438 ir_node *res, *known;
2440 /* Allocate a new node on the obstack. The allocation copies the in
2442 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2443 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2445 /* This loop checks whether the Phi has more than one predecessor.
2446 If so, it is a real Phi node and we break the loop. Else the
2447 Phi node merges the same definition on several paths and therefore
2448 is not needed. Don't consider Bad nodes! */
2450 for (i=0; i < ins; ++i)
2454 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2456 /* Optimize self referencing Phis: We can't detect them yet properly, as
2457 they still refer to the Phi0 they will replace. So replace right now. */
2458 if (phi0 && in[i] == phi0) in[i] = res;
2460 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2468 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2471 edges_node_deleted(res, current_ir_graph);
2472 obstack_free (current_ir_graph->obst, res);
2473 if (is_Phi(known)) {
2474 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2475 order, an enclosing Phi know may get superfluous. */
2476 res = optimize_in_place_2(known);
2478 exchange(known, res);
2484 /* A undefined value, e.g., in unreachable code. */
2488 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2489 IRN_VRFY_IRG(res, irg);
2490 /* Memory Phis in endless loops must be kept alive.
2491 As we can't distinguish these easily we keep all of them alive. */
2492 if ((res->op == op_Phi) && (mode == mode_M))
2493 add_End_keepalive(irg->end, res);
2500 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2502 #if PRECISE_EXC_CONTEXT
2504 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2506 /* Construct a new frag_array for node n.
2507 Copy the content from the current graph_arr of the corresponding block:
2508 this is the current state.
2509 Set ProjM(n) as current memory state.
2510 Further the last entry in frag_arr of current block points to n. This
2511 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2513 static INLINE ir_node ** new_frag_arr (ir_node *n)
2518 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2519 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2520 sizeof(ir_node *)*current_ir_graph->n_loc);
2522 /* turn off optimization before allocating Proj nodes, as res isn't
2524 opt = get_opt_optimize(); set_optimize(0);
2525 /* Here we rely on the fact that all frag ops have Memory as first result! */
2526 if (get_irn_op(n) == op_Call)
2527 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2528 else if (get_irn_op(n) == op_CopyB)
2529 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2530 else if (get_irn_op(n) == op_Bound)
2531 arr[0] = new_Proj(n, mode_M, pn_Bound_M_except);
2533 assert((pn_Quot_M == pn_DivMod_M) &&
2534 (pn_Quot_M == pn_Div_M) &&
2535 (pn_Quot_M == pn_Mod_M) &&
2536 (pn_Quot_M == pn_Load_M) &&
2537 (pn_Quot_M == pn_Store_M) &&
2538 (pn_Quot_M == pn_Alloc_M) );
2539 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2543 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2548 * returns the frag_arr from a node
2550 static INLINE ir_node **
2551 get_frag_arr (ir_node *n) {
2552 switch (get_irn_opcode(n)) {
2554 return n->attr.call.exc.frag_arr;
2556 return n->attr.a.exc.frag_arr;
2558 return n->attr.load.exc.frag_arr;
2560 return n->attr.store.exc.frag_arr;
2562 return n->attr.except.frag_arr;
2567 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2569 if (!frag_arr[pos]) frag_arr[pos] = val;
2570 if (frag_arr[current_ir_graph->n_loc - 1]) {
2571 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2572 assert(arr != frag_arr && "Endless recursion detected");
2573 set_frag_value(arr, pos, val);
2578 for (i = 0; i < 1000; ++i) {
2579 if (!frag_arr[pos]) {
2580 frag_arr[pos] = val;
2582 if (frag_arr[current_ir_graph->n_loc - 1]) {
2583 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2589 assert(0 && "potential endless recursion");
2594 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2598 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2600 frag_arr = get_frag_arr(cfOp);
2601 res = frag_arr[pos];
2603 if (block->attr.block.graph_arr[pos]) {
2604 /* There was a set_value after the cfOp and no get_value before that
2605 set_value. We must build a Phi node now. */
2606 if (block->attr.block.matured) {
2607 int ins = get_irn_arity(block);
2609 NEW_ARR_A (ir_node *, nin, ins);
2610 res = phi_merge(block, pos, mode, nin, ins);
2612 res = new_rd_Phi0 (current_ir_graph, block, mode);
2613 res->attr.phi0_pos = pos;
2614 res->link = block->link;
2618 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2619 but this should be better: (remove comment if this works) */
2620 /* It's a Phi, we can write this into all graph_arrs with NULL */
2621 set_frag_value(block->attr.block.graph_arr, pos, res);
2623 res = get_r_value_internal(block, pos, mode);
2624 set_frag_value(block->attr.block.graph_arr, pos, res);
2629 #endif /* PRECISE_EXC_CONTEXT */
2632 computes the predecessors for the real phi node, and then
2633 allocates and returns this node. The routine called to allocate the
2634 node might optimize it away and return a real value.
2635 This function must be called with an in-array of proper size. **/
2637 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2639 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2642 /* If this block has no value at pos create a Phi0 and remember it
2643 in graph_arr to break recursions.
2644 Else we may not set graph_arr as there a later value is remembered. */
2646 if (!block->attr.block.graph_arr[pos]) {
2647 if (block == get_irg_start_block(current_ir_graph)) {
2648 /* Collapsing to Bad tarvals is no good idea.
2649 So we call a user-supplied routine here that deals with this case as
2650 appropriate for the given language. Sorrily the only help we can give
2651 here is the position.
2653 Even if all variables are defined before use, it can happen that
2654 we get to the start block, if a Cond has been replaced by a tuple
2655 (bad, jmp). In this case we call the function needlessly, eventually
2656 generating an non existent error.
2657 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2660 if (default_initialize_local_variable) {
2661 ir_node *rem = get_cur_block();
2663 set_cur_block(block);
2664 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2668 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2669 /* We don't need to care about exception ops in the start block.
2670 There are none by definition. */
2671 return block->attr.block.graph_arr[pos];
2673 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2674 block->attr.block.graph_arr[pos] = phi0;
2675 #if PRECISE_EXC_CONTEXT
2676 if (get_opt_precise_exc_context()) {
2677 /* Set graph_arr for fragile ops. Also here we should break recursion.
2678 We could choose a cyclic path through an cfop. But the recursion would
2679 break at some point. */
2680 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2686 /* This loop goes to all predecessor blocks of the block the Phi node
2687 is in and there finds the operands of the Phi node by calling
2688 get_r_value_internal. */
2689 for (i = 1; i <= ins; ++i) {
2690 prevCfOp = skip_Proj(block->in[i]);
2692 if (is_Bad(prevCfOp)) {
2693 /* In case a Cond has been optimized we would get right to the start block
2694 with an invalid definition. */
2695 nin[i-1] = new_Bad();
2698 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2700 if (!is_Bad(prevBlock)) {
2701 #if PRECISE_EXC_CONTEXT
2702 if (get_opt_precise_exc_context() &&
2703 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2704 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2705 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2708 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2710 nin[i-1] = new_Bad();
2714 /* We want to pass the Phi0 node to the constructor: this finds additional
2715 optimization possibilities.
2716 The Phi0 node either is allocated in this function, or it comes from
2717 a former call to get_r_value_internal. In this case we may not yet
2718 exchange phi0, as this is done in mature_immBlock. */
2720 phi0_all = block->attr.block.graph_arr[pos];
2721 if (!((get_irn_op(phi0_all) == op_Phi) &&
2722 (get_irn_arity(phi0_all) == 0) &&
2723 (get_nodes_block(phi0_all) == block)))
2729 /* After collecting all predecessors into the array nin a new Phi node
2730 with these predecessors is created. This constructor contains an
2731 optimization: If all predecessors of the Phi node are identical it
2732 returns the only operand instead of a new Phi node. */
2733 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2735 /* In case we allocated a Phi0 node at the beginning of this procedure,
2736 we need to exchange this Phi0 with the real Phi. */
2738 exchange(phi0, res);
2739 block->attr.block.graph_arr[pos] = res;
2740 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2741 only an optimization. */
2747 /* This function returns the last definition of a variable. In case
2748 this variable was last defined in a previous block, Phi nodes are
2749 inserted. If the part of the firm graph containing the definition
2750 is not yet constructed, a dummy Phi node is returned. */
2752 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2755 /* There are 4 cases to treat.
2757 1. The block is not mature and we visit it the first time. We can not
2758 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2759 predecessors is returned. This node is added to the linked list (field
2760 "link") of the containing block to be completed when this block is
2761 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2764 2. The value is already known in this block, graph_arr[pos] is set and we
2765 visit the block the first time. We can return the value without
2766 creating any new nodes.
2768 3. The block is mature and we visit it the first time. A Phi node needs
2769 to be created (phi_merge). If the Phi is not needed, as all it's
2770 operands are the same value reaching the block through different
2771 paths, it's optimized away and the value itself is returned.
2773 4. The block is mature, and we visit it the second time. Now two
2774 subcases are possible:
2775 * The value was computed completely the last time we were here. This
2776 is the case if there is no loop. We can return the proper value.
2777 * The recursion that visited this node and set the flag did not
2778 return yet. We are computing a value in a loop and need to
2779 break the recursion. This case only happens if we visited
2780 the same block with phi_merge before, which inserted a Phi0.
2781 So we return the Phi0.
2784 /* case 4 -- already visited. */
2785 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2786 /* As phi_merge allocates a Phi0 this value is always defined. Here
2787 is the critical difference of the two algorithms. */
2788 assert(block->attr.block.graph_arr[pos]);
2789 return block->attr.block.graph_arr[pos];
2792 /* visited the first time */
2793 set_irn_visited(block, get_irg_visited(current_ir_graph));
2795 /* Get the local valid value */
2796 res = block->attr.block.graph_arr[pos];
2798 /* case 2 -- If the value is actually computed, return it. */
2799 if (res) { return res; };
2801 if (block->attr.block.matured) { /* case 3 */
2803 /* The Phi has the same amount of ins as the corresponding block. */
2804 int ins = get_irn_arity(block);
2806 NEW_ARR_A (ir_node *, nin, ins);
2808 /* Phi merge collects the predecessors and then creates a node. */
2809 res = phi_merge (block, pos, mode, nin, ins);
2811 } else { /* case 1 */
2812 /* The block is not mature, we don't know how many in's are needed. A Phi
2813 with zero predecessors is created. Such a Phi node is called Phi0
2814 node. The Phi0 is then added to the list of Phi0 nodes in this block
2815 to be matured by mature_immBlock later.
2816 The Phi0 has to remember the pos of it's internal value. If the real
2817 Phi is computed, pos is used to update the array with the local
2819 res = new_rd_Phi0 (current_ir_graph, block, mode);
2820 res->attr.phi0_pos = pos;
2821 res->link = block->link;
2825 /* If we get here, the frontend missed a use-before-definition error */
2828 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2829 assert (mode->code >= irm_F && mode->code <= irm_P);
2830 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2831 get_mode_null(mode));
2834 /* The local valid value is available now. */
2835 block->attr.block.graph_arr[pos] = res;
2840 #endif /* USE_FAST_PHI_CONSTRUCTION */
2842 /* ************************************************************************** */
2845 * Finalize a Block node, when all control flows are known.
2846 * Acceptable parameters are only Block nodes.
2849 mature_immBlock (ir_node *block)
2855 assert (get_irn_opcode(block) == iro_Block);
2856 /* @@@ should be commented in
2857 assert (!get_Block_matured(block) && "Block already matured"); */
2859 if (!get_Block_matured(block)) {
2860 ins = ARR_LEN (block->in)-1;
2861 /* Fix block parameters */
2862 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2864 /* An array for building the Phi nodes. */
2865 NEW_ARR_A (ir_node *, nin, ins);
2867 /* Traverse a chain of Phi nodes attached to this block and mature
2869 for (n = block->link; n; n=next) {
2870 inc_irg_visited(current_ir_graph);
2872 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2875 block->attr.block.matured = 1;
2877 /* Now, as the block is a finished firm node, we can optimize it.
2878 Since other nodes have been allocated since the block was created
2879 we can not free the node on the obstack. Therefore we have to call
2881 Unfortunately the optimization does not change a lot, as all allocated
2882 nodes refer to the unoptimized node.
2883 We can call _2, as global cse has no effect on blocks. */
2884 block = optimize_in_place_2(block);
2885 IRN_VRFY_IRG(block, current_ir_graph);
2890 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2892 return new_bd_Phi(db, current_ir_graph->current_block,
2897 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2899 return new_bd_Const(db, current_ir_graph->start_block,
2904 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2906 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2910 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2912 return new_bd_Const_type(db, current_ir_graph->start_block,
2918 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2920 return new_bd_Id(db, current_ir_graph->current_block,
2925 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2927 return new_bd_Proj(db, current_ir_graph->current_block,
2932 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2935 assert(arg->op == op_Cond);
2936 arg->attr.c.kind = fragmentary;
2937 arg->attr.c.default_proj = max_proj;
2938 res = new_Proj (arg, mode_X, max_proj);
2943 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2945 return new_bd_Conv(db, current_ir_graph->current_block,
2950 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp)
2952 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2956 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2958 return new_bd_Tuple(db, current_ir_graph->current_block,
2963 new_d_Add (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2965 return new_bd_Add(db, current_ir_graph->current_block,
2970 new_d_Sub (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2972 return new_bd_Sub(db, current_ir_graph->current_block,
2978 new_d_Minus (dbg_info *db, ir_node *op, ir_mode *mode)
2980 return new_bd_Minus(db, current_ir_graph->current_block,
2985 new_d_Mul (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2987 return new_bd_Mul(db, current_ir_graph->current_block,
2992 * allocate the frag array
2994 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2995 if (get_opt_precise_exc_context()) {
2996 if ((current_ir_graph->phase_state == phase_building) &&
2997 (get_irn_op(res) == op) && /* Could be optimized away. */
2998 !*frag_store) /* Could be a cse where the arr is already set. */ {
2999 *frag_store = new_frag_arr(res);
3006 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3009 res = new_bd_Quot (db, current_ir_graph->current_block,
3011 res->attr.except.pin_state = op_pin_state_pinned;
3012 #if PRECISE_EXC_CONTEXT
3013 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
3020 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3023 res = new_bd_DivMod (db, current_ir_graph->current_block,
3025 res->attr.except.pin_state = op_pin_state_pinned;
3026 #if PRECISE_EXC_CONTEXT
3027 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
3034 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3037 res = new_bd_Div (db, current_ir_graph->current_block,
3039 res->attr.except.pin_state = op_pin_state_pinned;
3040 #if PRECISE_EXC_CONTEXT
3041 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
3048 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3051 res = new_bd_Mod (db, current_ir_graph->current_block,
3053 res->attr.except.pin_state = op_pin_state_pinned;
3054 #if PRECISE_EXC_CONTEXT
3055 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
3062 new_d_And (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3064 return new_bd_And (db, current_ir_graph->current_block,
3069 new_d_Or (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3071 return new_bd_Or (db, current_ir_graph->current_block,
3076 new_d_Eor (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3078 return new_bd_Eor (db, current_ir_graph->current_block,
3083 new_d_Not (dbg_info *db, ir_node *op, ir_mode *mode)
3085 return new_bd_Not (db, current_ir_graph->current_block,
3090 new_d_Shl (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3092 return new_bd_Shl (db, current_ir_graph->current_block,
3097 new_d_Shr (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3099 return new_bd_Shr (db, current_ir_graph->current_block,
3104 new_d_Shrs (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3106 return new_bd_Shrs (db, current_ir_graph->current_block,
3111 new_d_Rot (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3113 return new_bd_Rot (db, current_ir_graph->current_block,
3118 new_d_Abs (dbg_info *db, ir_node *op, ir_mode *mode)
3120 return new_bd_Abs (db, current_ir_graph->current_block,
3125 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
3127 return new_bd_Cmp (db, current_ir_graph->current_block,
3132 new_d_Jmp (dbg_info *db)
3134 return new_bd_Jmp (db, current_ir_graph->current_block);
3138 new_d_IJmp (dbg_info *db, ir_node *tgt)
3140 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
3144 new_d_Cond (dbg_info *db, ir_node *c)
3146 return new_bd_Cond (db, current_ir_graph->current_block, c);
3150 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
3154 res = new_bd_Call (db, current_ir_graph->current_block,
3155 store, callee, arity, in, tp);
3156 #if PRECISE_EXC_CONTEXT
3157 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
3164 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
3166 return new_bd_Return (db, current_ir_graph->current_block,
3171 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
3174 res = new_bd_Load (db, current_ir_graph->current_block,
3176 #if PRECISE_EXC_CONTEXT
3177 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
3184 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
3187 res = new_bd_Store (db, current_ir_graph->current_block,
3189 #if PRECISE_EXC_CONTEXT
3190 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
3197 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
3201 res = new_bd_Alloc (db, current_ir_graph->current_block,
3202 store, size, alloc_type, where);
3203 #if PRECISE_EXC_CONTEXT
3204 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
3211 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
3212 ir_node *size, ir_type *free_type, where_alloc where)
3214 return new_bd_Free (db, current_ir_graph->current_block,
3215 store, ptr, size, free_type, where);
3219 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
3220 /* GL: objptr was called frame before. Frame was a bad choice for the name
3221 as the operand could as well be a pointer to a dynamic object. */
3223 return new_bd_Sel (db, current_ir_graph->current_block,
3224 store, objptr, 0, NULL, ent);
3228 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
3230 return new_bd_Sel (db, current_ir_graph->current_block,
3231 store, objptr, n_index, index, sel);
3235 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
3237 return new_bd_SymConst_type (db, current_ir_graph->start_block,
3242 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
3244 return new_bd_SymConst (db, current_ir_graph->start_block,
3249 new_d_Sync (dbg_info *db, int arity, ir_node** in)
3251 return new_bd_Sync (db, current_ir_graph->current_block,
3258 return _new_d_Bad();
3262 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
3264 return new_bd_Confirm (db, current_ir_graph->current_block,
3269 new_d_Unknown (ir_mode *m)
3271 return new_bd_Unknown(m);
3275 new_d_CallBegin (dbg_info *db, ir_node *call)
3278 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
3283 new_d_EndReg (dbg_info *db)
3286 res = new_bd_EndReg(db, current_ir_graph->current_block);
3291 new_d_EndExcept (dbg_info *db)
3294 res = new_bd_EndExcept(db, current_ir_graph->current_block);
3299 new_d_Break (dbg_info *db)
3301 return new_bd_Break (db, current_ir_graph->current_block);
3305 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
3307 return new_bd_Filter (db, current_ir_graph->current_block,
3314 return _new_d_NoMem();
3318 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
3319 ir_node *ir_true, ir_mode *mode) {
3320 return new_bd_Mux (db, current_ir_graph->current_block,
3321 sel, ir_false, ir_true, mode);
3324 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
3325 ir_node *dst, ir_node *src, ir_type *data_type) {
3327 res = new_bd_CopyB(db, current_ir_graph->current_block,
3328 store, dst, src, data_type);
3329 #if PRECISE_EXC_CONTEXT
3330 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
3336 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
3338 return new_bd_InstOf (db, current_ir_graph->current_block,
3339 store, objptr, type);
3343 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
3345 return new_bd_Raise (db, current_ir_graph->current_block,
3349 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
3350 ir_node *idx, ir_node *lower, ir_node *upper) {
3352 res = new_bd_Bound(db, current_ir_graph->current_block,
3353 store, idx, lower, upper);
3354 #if PRECISE_EXC_CONTEXT
3355 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
3360 /* ********************************************************************* */
3361 /* Comfortable interface with automatic Phi node construction. */
3362 /* (Uses also constructors of ?? interface, except new_Block. */
3363 /* ********************************************************************* */
3365 /* Block construction */
3366 /* immature Block without predecessors */
3367 ir_node *new_d_immBlock (dbg_info *db) {
3370 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3371 /* creates a new dynamic in-array as length of in is -1 */
3372 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3373 current_ir_graph->current_block = res;
3374 res->attr.block.matured = 0;
3375 res->attr.block.dead = 0;
3376 /* res->attr.block.exc = exc_normal; */
3377 /* res->attr.block.handler_entry = 0; */
3378 res->attr.block.irg = current_ir_graph;
3379 res->attr.block.backedge = NULL;
3380 res->attr.block.in_cg = NULL;
3381 res->attr.block.cg_backedge = NULL;
3382 set_Block_block_visited(res, 0);
3384 /* Create and initialize array for Phi-node construction. */
3385 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3386 current_ir_graph->n_loc);
3387 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3389 /* Immature block may not be optimized! */
3390 IRN_VRFY_IRG(res, current_ir_graph);
3396 new_immBlock (void) {
3397 return new_d_immBlock(NULL);
3400 /* add an edge to a jmp/control flow node */
3402 add_immBlock_pred (ir_node *block, ir_node *jmp)
3404 if (block->attr.block.matured) {
3405 assert(0 && "Error: Block already matured!\n");
3408 assert(jmp != NULL);
3409 ARR_APP1(ir_node *, block->in, jmp);
3413 /* changing the current block */
3415 set_cur_block (ir_node *target) {
3416 current_ir_graph->current_block = target;
3419 /* ************************ */
3420 /* parameter administration */
3422 /* get a value from the parameter array from the current block by its index */
3424 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3426 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3427 inc_irg_visited(current_ir_graph);
3429 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3431 /* get a value from the parameter array from the current block by its index */
3433 get_value (int pos, ir_mode *mode)
3435 return get_d_value(NULL, pos, mode);
3438 /* set a value at position pos in the parameter array from the current block */
3440 set_value (int pos, ir_node *value)
3442 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3443 assert(pos+1 < current_ir_graph->n_loc);
3444 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3448 find_value(ir_node *value)
3451 ir_node *bl = current_ir_graph->current_block;
3453 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3454 if (bl->attr.block.graph_arr[i] == value)
3459 /* get the current store */
3463 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3464 /* GL: one could call get_value instead */
3465 inc_irg_visited(current_ir_graph);
3466 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3469 /* set the current store */
3471 set_store (ir_node *store)
3473 /* GL: one could call set_value instead */
3474 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3475 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3479 keep_alive (ir_node *ka) {
3480 add_End_keepalive(current_ir_graph->end, ka);
3483 /* --- Useful access routines --- */
3484 /* Returns the current block of the current graph. To set the current
3485 block use set_cur_block. */
3486 ir_node *get_cur_block(void) {
3487 return get_irg_current_block(current_ir_graph);
3490 /* Returns the frame type of the current graph */
3491 ir_type *get_cur_frame_type(void) {
3492 return get_irg_frame_type(current_ir_graph);
3496 /* ********************************************************************* */
3499 /* call once for each run of the library */
3501 init_cons(uninitialized_local_variable_func_t *func)
3503 default_initialize_local_variable = func;
3506 /* call for each graph */
3508 irg_finalize_cons (ir_graph *irg) {
3509 irg->phase_state = phase_high;
3513 irp_finalize_cons (void) {
3514 int i, n_irgs = get_irp_n_irgs();
3515 for (i = 0; i < n_irgs; i++) {
3516 irg_finalize_cons(get_irp_irg(i));
3518 irp->phase_state = phase_high;\
3524 ir_node *new_Block(int arity, ir_node **in) {
3525 return new_d_Block(NULL, arity, in);
3527 ir_node *new_Start (void) {
3528 return new_d_Start(NULL);
3530 ir_node *new_End (void) {
3531 return new_d_End(NULL);
3533 ir_node *new_Jmp (void) {
3534 return new_d_Jmp(NULL);
3536 ir_node *new_IJmp (ir_node *tgt) {
3537 return new_d_IJmp(NULL, tgt);
3539 ir_node *new_Cond (ir_node *c) {
3540 return new_d_Cond(NULL, c);
3542 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3543 return new_d_Return(NULL, store, arity, in);
3545 ir_node *new_Const (ir_mode *mode, tarval *con) {
3546 return new_d_Const(NULL, mode, con);
3549 ir_node *new_Const_long(ir_mode *mode, long value)
3551 return new_d_Const_long(NULL, mode, value);
3554 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3555 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3558 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3559 return new_d_SymConst(NULL, value, kind);
3561 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3562 return new_d_simpleSel(NULL, store, objptr, ent);
3564 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3566 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3568 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3570 return new_d_Call(NULL, store, callee, arity, in, tp);
3572 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3573 return new_d_Add(NULL, op1, op2, mode);
3575 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3576 return new_d_Sub(NULL, op1, op2, mode);
3578 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3579 return new_d_Minus(NULL, op, mode);
3581 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3582 return new_d_Mul(NULL, op1, op2, mode);
3584 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3585 return new_d_Quot(NULL, memop, op1, op2);
3587 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3588 return new_d_DivMod(NULL, memop, op1, op2);
3590 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3591 return new_d_Div(NULL, memop, op1, op2);
3593 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3594 return new_d_Mod(NULL, memop, op1, op2);
3596 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3597 return new_d_Abs(NULL, op, mode);
3599 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3600 return new_d_And(NULL, op1, op2, mode);
3602 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3603 return new_d_Or(NULL, op1, op2, mode);
3605 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3606 return new_d_Eor(NULL, op1, op2, mode);
3608 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3609 return new_d_Not(NULL, op, mode);
3611 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3612 return new_d_Shl(NULL, op, k, mode);
3614 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3615 return new_d_Shr(NULL, op, k, mode);
3617 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3618 return new_d_Shrs(NULL, op, k, mode);
3620 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3621 return new_d_Rot(NULL, op, k, mode);
3623 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3624 return new_d_Cmp(NULL, op1, op2);
3626 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3627 return new_d_Conv(NULL, op, mode);
3629 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3630 return new_d_Cast(NULL, op, to_tp);
3632 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3633 return new_d_Phi(NULL, arity, in, mode);
3635 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3636 return new_d_Load(NULL, store, addr, mode);
3638 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3639 return new_d_Store(NULL, store, addr, val);
3641 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3642 where_alloc where) {
3643 return new_d_Alloc(NULL, store, size, alloc_type, where);
3645 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3646 ir_type *free_type, where_alloc where) {
3647 return new_d_Free(NULL, store, ptr, size, free_type, where);
3649 ir_node *new_Sync (int arity, ir_node **in) {
3650 return new_d_Sync(NULL, arity, in);
3652 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3653 return new_d_Proj(NULL, arg, mode, proj);
3655 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3656 return new_d_defaultProj(NULL, arg, max_proj);
3658 ir_node *new_Tuple (int arity, ir_node **in) {
3659 return new_d_Tuple(NULL, arity, in);
3661 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3662 return new_d_Id(NULL, val, mode);
3664 ir_node *new_Bad (void) {
3667 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3668 return new_d_Confirm (NULL, val, bound, cmp);
3670 ir_node *new_Unknown(ir_mode *m) {
3671 return new_d_Unknown(m);
3673 ir_node *new_CallBegin (ir_node *callee) {
3674 return new_d_CallBegin(NULL, callee);
3676 ir_node *new_EndReg (void) {
3677 return new_d_EndReg(NULL);
3679 ir_node *new_EndExcept (void) {
3680 return new_d_EndExcept(NULL);
3682 ir_node *new_Break (void) {
3683 return new_d_Break(NULL);
3685 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3686 return new_d_Filter(NULL, arg, mode, proj);
3688 ir_node *new_NoMem (void) {
3689 return new_d_NoMem();
3691 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3692 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3694 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3695 return new_d_CopyB(NULL, store, dst, src, data_type);
3697 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3698 return new_d_InstOf (NULL, store, objptr, ent);
3700 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3701 return new_d_Raise(NULL, store, obj);
3703 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3704 return new_d_Bound(NULL, store, idx, lower, upper);