3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* Constructs a Block with a fixed number of predecessors.
67 Does not set current_block. Can not be used with automatic
68 Phi node construction. */
70 new_bd_Block (dbg_info *db, int arity, ir_node **in)
73 ir_graph *irg = current_ir_graph;
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
86 res->attr.block.extblk = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_bd_Start (dbg_info *db, ir_node *block)
96 ir_graph *irg = current_ir_graph;
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_bd_End (dbg_info *db, ir_node *block)
109 ir_graph *irg = current_ir_graph;
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 ir_graph *irg = current_ir_graph;
125 bool has_unknown = false;
127 /* Don't assert that block matured: the use of this constructor is strongly
129 if ( get_Block_matured(block) )
130 assert( get_irn_arity(block) == arity );
132 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
134 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
136 for (i = arity-1; i >= 0; i--)
137 if (get_irn_op(in[i]) == op_Unknown) {
142 if (!has_unknown) res = optimize_node (res);
143 IRN_VRFY_IRG(res, irg);
145 /* Memory Phis in endless loops must be kept alive.
146 As we can't distinguish these easily we keep all of them alive. */
147 if ((res->op == op_Phi) && (mode == mode_M))
148 add_End_keepalive(irg->end, res);
153 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 ir_graph *irg = current_ir_graph;
158 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
159 res->attr.con.tv = con;
160 set_Const_type(res, tp); /* Call method because of complex assertion. */
161 res = optimize_node (res);
162 assert(get_Const_type(res) == tp);
163 IRN_VRFY_IRG(res, irg);
169 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
171 ir_graph *irg = current_ir_graph;
173 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
177 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
179 ir_graph *irg = current_ir_graph;
181 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
185 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
191 res = optimize_node(res);
192 IRN_VRFY_IRG(res, irg);
197 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
204 res->attr.proj = proj;
207 assert(get_Proj_pred(res));
208 assert(get_nodes_block(get_Proj_pred(res)));
210 res = optimize_node(res);
212 IRN_VRFY_IRG(res, irg);
218 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
222 ir_graph *irg = current_ir_graph;
224 assert(arg->op == op_Cond);
225 arg->attr.c.kind = fragmentary;
226 arg->attr.c.default_proj = max_proj;
227 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
232 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
235 ir_graph *irg = current_ir_graph;
237 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
238 res = optimize_node(res);
239 IRN_VRFY_IRG(res, irg);
244 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, type *to_tp)
247 ir_graph *irg = current_ir_graph;
249 assert(is_atomic_type(to_tp));
251 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
252 res->attr.cast.totype = to_tp;
253 res = optimize_node(res);
254 IRN_VRFY_IRG(res, irg);
259 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_bd_Add (dbg_info *db, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
276 ir_graph *irg = current_ir_graph;
280 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
281 res = optimize_node(res);
282 IRN_VRFY_IRG(res, irg);
287 new_bd_Sub (dbg_info *db, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
297 res = optimize_node (res);
298 IRN_VRFY_IRG(res, irg);
304 new_bd_Minus (dbg_info *db, ir_node *block,
305 ir_node *op, ir_mode *mode)
308 ir_graph *irg = current_ir_graph;
310 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
311 res = optimize_node(res);
312 IRN_VRFY_IRG(res, irg);
317 new_bd_Mul (dbg_info *db, ir_node *block,
318 ir_node *op1, ir_node *op2, ir_mode *mode)
322 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
327 res = optimize_node(res);
328 IRN_VRFY_IRG(res, irg);
333 new_bd_Quot (dbg_info *db, ir_node *block,
334 ir_node *memop, ir_node *op1, ir_node *op2)
338 ir_graph *irg = current_ir_graph;
343 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
344 res = optimize_node(res);
345 IRN_VRFY_IRG(res, irg);
350 new_bd_DivMod (dbg_info *db, ir_node *block,
351 ir_node *memop, ir_node *op1, ir_node *op2)
355 ir_graph *irg = current_ir_graph;
360 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
367 new_bd_Div (dbg_info *db, ir_node *block,
368 ir_node *memop, ir_node *op1, ir_node *op2)
372 ir_graph *irg = current_ir_graph;
377 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
378 res = optimize_node(res);
379 IRN_VRFY_IRG(res, irg);
384 new_bd_Mod (dbg_info *db, ir_node *block,
385 ir_node *memop, ir_node *op1, ir_node *op2)
389 ir_graph *irg = current_ir_graph;
394 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
395 res = optimize_node(res);
396 IRN_VRFY_IRG(res, irg);
401 new_bd_And (dbg_info *db, ir_node *block,
402 ir_node *op1, ir_node *op2, ir_mode *mode)
406 ir_graph *irg = current_ir_graph;
410 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_bd_Or (dbg_info *db, ir_node *block,
418 ir_node *op1, ir_node *op2, ir_mode *mode)
422 ir_graph *irg = current_ir_graph;
426 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
427 res = optimize_node(res);
428 IRN_VRFY_IRG(res, irg);
433 new_bd_Eor (dbg_info *db, ir_node *block,
434 ir_node *op1, ir_node *op2, ir_mode *mode)
438 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
443 res = optimize_node (res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_Not (dbg_info *db, ir_node *block,
450 ir_node *op, ir_mode *mode)
453 ir_graph *irg = current_ir_graph;
455 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_bd_Shl (dbg_info *db, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
467 ir_graph *irg = current_ir_graph;
471 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Shr (dbg_info *db, ir_node *block,
479 ir_node *op, ir_node *k, ir_mode *mode)
483 ir_graph *irg = current_ir_graph;
487 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_bd_Shrs (dbg_info *db, ir_node *block,
495 ir_node *op, ir_node *k, ir_mode *mode)
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
504 res = optimize_node(res);
505 IRN_VRFY_IRG(res, irg);
510 new_bd_Rot (dbg_info *db, ir_node *block,
511 ir_node *op, ir_node *k, ir_mode *mode)
515 ir_graph *irg = current_ir_graph;
519 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
520 res = optimize_node(res);
521 IRN_VRFY_IRG(res, irg);
526 new_bd_Abs (dbg_info *db, ir_node *block,
527 ir_node *op, ir_mode *mode)
530 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
533 res = optimize_node (res);
534 IRN_VRFY_IRG(res, irg);
539 new_bd_Cmp (dbg_info *db, ir_node *block,
540 ir_node *op1, ir_node *op2)
544 ir_graph *irg = current_ir_graph;
549 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Jmp (dbg_info *db, ir_node *block)
559 ir_graph *irg = current_ir_graph;
561 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
562 res = optimize_node (res);
563 IRN_VRFY_IRG (res, irg);
568 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
571 ir_graph *irg = current_ir_graph;
573 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
574 res = optimize_node (res);
575 IRN_VRFY_IRG (res, irg);
577 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
583 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
586 ir_graph *irg = current_ir_graph;
588 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
589 res->attr.c.kind = dense;
590 res->attr.c.default_proj = 0;
591 res->attr.c.pred = COND_JMP_PRED_NONE;
592 res = optimize_node (res);
593 IRN_VRFY_IRG(res, irg);
598 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
599 ir_node *callee, int arity, ir_node **in, type *tp)
604 ir_graph *irg = current_ir_graph;
607 NEW_ARR_A(ir_node *, r_in, r_arity);
610 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
612 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
614 assert((get_unknown_type() == tp) || is_Method_type(tp));
615 set_Call_type(res, tp);
616 res->attr.call.exc.pin_state = op_pin_state_pinned;
617 res->attr.call.callee_arr = NULL;
618 res = optimize_node(res);
619 IRN_VRFY_IRG(res, irg);
624 new_bd_Return (dbg_info *db, ir_node *block,
625 ir_node *store, int arity, ir_node **in)
630 ir_graph *irg = current_ir_graph;
633 NEW_ARR_A (ir_node *, r_in, r_arity);
635 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
636 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
647 ir_graph *irg = current_ir_graph;
651 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
658 new_bd_Load (dbg_info *db, ir_node *block,
659 ir_node *store, ir_node *adr, ir_mode *mode)
663 ir_graph *irg = current_ir_graph;
667 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
668 res->attr.load.exc.pin_state = op_pin_state_pinned;
669 res->attr.load.load_mode = mode;
670 res->attr.load.volatility = volatility_non_volatile;
671 res = optimize_node(res);
672 IRN_VRFY_IRG(res, irg);
677 new_bd_Store (dbg_info *db, ir_node *block,
678 ir_node *store, ir_node *adr, ir_node *val)
682 ir_graph *irg = current_ir_graph;
687 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
688 res->attr.store.exc.pin_state = op_pin_state_pinned;
689 res->attr.store.volatility = volatility_non_volatile;
690 res = optimize_node(res);
691 IRN_VRFY_IRG(res, irg);
696 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
697 ir_node *size, type *alloc_type, where_alloc where)
701 ir_graph *irg = current_ir_graph;
705 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
706 res->attr.a.exc.pin_state = op_pin_state_pinned;
707 res->attr.a.where = where;
708 res->attr.a.type = alloc_type;
709 res = optimize_node(res);
710 IRN_VRFY_IRG(res, irg);
715 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
716 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
720 ir_graph *irg = current_ir_graph;
725 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
726 res->attr.f.where = where;
727 res->attr.f.type = free_type;
728 res = optimize_node(res);
729 IRN_VRFY_IRG(res, irg);
734 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
735 int arity, ir_node **in, entity *ent)
740 ir_graph *irg = current_ir_graph;
742 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
745 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
748 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
750 * FIXM: Sel's can select functions which should be of mode mode_P_code.
752 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
753 res->attr.s.ent = ent;
754 res = optimize_node(res);
755 IRN_VRFY_IRG(res, irg);
760 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
761 ir_node *objptr, type *ent)
766 ir_graph *irg = current_ir_graph;
769 NEW_ARR_A(ir_node *, r_in, r_arity);
773 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
774 res->attr.io.ent = ent;
776 /* res = optimize(res); */
777 IRN_VRFY_IRG(res, irg);
782 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
783 symconst_kind symkind, type *tp) {
786 ir_graph *irg = current_ir_graph;
788 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
789 mode = mode_P_data; /* FIXME: can be mode_P_code */
793 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
795 res->attr.i.num = symkind;
796 res->attr.i.sym = value;
799 res = optimize_node(res);
800 IRN_VRFY_IRG(res, irg);
805 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
806 symconst_kind symkind)
808 ir_graph *irg = current_ir_graph;
810 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
815 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
818 ir_graph *irg = current_ir_graph;
820 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
821 res = optimize_node(res);
822 IRN_VRFY_IRG(res, irg);
827 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
829 ir_node *in[2], *res;
830 ir_graph *irg = current_ir_graph;
834 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
835 res->attr.confirm_cmp = cmp;
836 res = optimize_node (res);
837 IRN_VRFY_IRG(res, irg);
841 /* this function is often called with current_ir_graph unset */
843 new_bd_Unknown (ir_mode *m)
846 ir_graph *irg = current_ir_graph;
848 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
849 res = optimize_node(res);
854 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
858 ir_graph *irg = current_ir_graph;
860 in[0] = get_Call_ptr(call);
861 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
862 /* res->attr.callbegin.irg = irg; */
863 res->attr.callbegin.call = call;
864 res = optimize_node(res);
865 IRN_VRFY_IRG(res, irg);
870 new_bd_EndReg (dbg_info *db, ir_node *block)
873 ir_graph *irg = current_ir_graph;
875 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
877 IRN_VRFY_IRG(res, irg);
882 new_bd_EndExcept (dbg_info *db, ir_node *block)
885 ir_graph *irg = current_ir_graph;
887 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
888 irg->end_except = res;
889 IRN_VRFY_IRG (res, irg);
894 new_bd_Break (dbg_info *db, ir_node *block)
897 ir_graph *irg = current_ir_graph;
899 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
900 res = optimize_node(res);
901 IRN_VRFY_IRG(res, irg);
906 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
910 ir_graph *irg = current_ir_graph;
912 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
913 res->attr.filter.proj = proj;
914 res->attr.filter.in_cg = NULL;
915 res->attr.filter.backedge = NULL;
918 assert(get_Proj_pred(res));
919 assert(get_nodes_block(get_Proj_pred(res)));
921 res = optimize_node(res);
922 IRN_VRFY_IRG(res, irg);
927 new_bd_Mux (dbg_info *db, ir_node *block,
928 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
932 ir_graph *irg = current_ir_graph;
938 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
941 res = optimize_node(res);
942 IRN_VRFY_IRG(res, irg);
947 new_bd_CopyB (dbg_info *db, ir_node *block,
948 ir_node *store, ir_node *dst, ir_node *src, type *data_type)
952 ir_graph *irg = current_ir_graph;
958 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
960 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
961 res->attr.copyb.data_type = data_type;
962 res = optimize_node(res);
963 IRN_VRFY_IRG(res, irg);
967 /* --------------------------------------------- */
968 /* private interfaces, for professional use only */
969 /* --------------------------------------------- */
971 /* Constructs a Block with a fixed number of predecessors.
972 Does not set current_block. Can not be used with automatic
973 Phi node construction. */
975 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
977 ir_graph *rem = current_ir_graph;
980 current_ir_graph = irg;
981 res = new_bd_Block (db, arity, in);
982 current_ir_graph = rem;
988 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
990 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Start (db, block);
995 current_ir_graph = rem;
1001 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
1004 ir_graph *rem = current_ir_graph;
1006 current_ir_graph = rem;
1007 res = new_bd_End (db, block);
1008 current_ir_graph = rem;
1013 /* Creates a Phi node with all predecessors. Calling this constructor
1014 is only allowed if the corresponding block is mature. */
1016 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
1019 ir_graph *rem = current_ir_graph;
1021 current_ir_graph = irg;
1022 res = new_bd_Phi (db, block,arity, in, mode);
1023 current_ir_graph = rem;
1029 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
1032 ir_graph *rem = current_ir_graph;
1034 current_ir_graph = irg;
1035 res = new_bd_Const_type (db, block, mode, con, tp);
1036 current_ir_graph = rem;
1042 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
1045 ir_graph *rem = current_ir_graph;
1047 current_ir_graph = irg;
1048 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
1049 current_ir_graph = rem;
1055 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
1057 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
1061 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
1064 ir_graph *rem = current_ir_graph;
1066 current_ir_graph = irg;
1067 res = new_bd_Id(db, block, val, mode);
1068 current_ir_graph = rem;
1074 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1078 ir_graph *rem = current_ir_graph;
1080 current_ir_graph = irg;
1081 res = new_bd_Proj(db, block, arg, mode, proj);
1082 current_ir_graph = rem;
1088 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_defaultProj(db, block, arg, max_proj);
1096 current_ir_graph = rem;
1102 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1105 ir_graph *rem = current_ir_graph;
1107 current_ir_graph = irg;
1108 res = new_bd_Conv(db, block, op, mode);
1109 current_ir_graph = rem;
1115 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
1118 ir_graph *rem = current_ir_graph;
1120 current_ir_graph = irg;
1121 res = new_bd_Cast(db, block, op, to_tp);
1122 current_ir_graph = rem;
1128 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1131 ir_graph *rem = current_ir_graph;
1133 current_ir_graph = irg;
1134 res = new_bd_Tuple(db, block, arity, in);
1135 current_ir_graph = rem;
1141 new_rd_Add (dbg_info *db, ir_graph *irg, ir_node *block,
1142 ir_node *op1, ir_node *op2, ir_mode *mode)
1145 ir_graph *rem = current_ir_graph;
1147 current_ir_graph = irg;
1148 res = new_bd_Add(db, block, op1, op2, mode);
1149 current_ir_graph = rem;
1155 new_rd_Sub (dbg_info *db, ir_graph *irg, ir_node *block,
1156 ir_node *op1, ir_node *op2, ir_mode *mode)
1159 ir_graph *rem = current_ir_graph;
1161 current_ir_graph = irg;
1162 res = new_bd_Sub(db, block, op1, op2, mode);
1163 current_ir_graph = rem;
1169 new_rd_Minus (dbg_info *db, ir_graph *irg, ir_node *block,
1170 ir_node *op, ir_mode *mode)
1173 ir_graph *rem = current_ir_graph;
1175 current_ir_graph = irg;
1176 res = new_bd_Minus(db, block, op, mode);
1177 current_ir_graph = rem;
1183 new_rd_Mul (dbg_info *db, ir_graph *irg, ir_node *block,
1184 ir_node *op1, ir_node *op2, ir_mode *mode)
1187 ir_graph *rem = current_ir_graph;
1189 current_ir_graph = irg;
1190 res = new_bd_Mul(db, block, op1, op2, mode);
1191 current_ir_graph = rem;
1197 new_rd_Quot (dbg_info *db, ir_graph *irg, ir_node *block,
1198 ir_node *memop, ir_node *op1, ir_node *op2)
1201 ir_graph *rem = current_ir_graph;
1203 current_ir_graph = irg;
1204 res = new_bd_Quot(db, block, memop, op1, op2);
1205 current_ir_graph = rem;
1211 new_rd_DivMod (dbg_info *db, ir_graph *irg, ir_node *block,
1212 ir_node *memop, ir_node *op1, ir_node *op2)
1215 ir_graph *rem = current_ir_graph;
1217 current_ir_graph = irg;
1218 res = new_bd_DivMod(db, block, memop, op1, op2);
1219 current_ir_graph = rem;
1225 new_rd_Div (dbg_info *db, ir_graph *irg, ir_node *block,
1226 ir_node *memop, ir_node *op1, ir_node *op2)
1229 ir_graph *rem = current_ir_graph;
1231 current_ir_graph = irg;
1232 res = new_bd_Div (db, block, memop, op1, op2);
1233 current_ir_graph =rem;
1239 new_rd_Mod (dbg_info *db, ir_graph *irg, ir_node *block,
1240 ir_node *memop, ir_node *op1, ir_node *op2)
1243 ir_graph *rem = current_ir_graph;
1245 current_ir_graph = irg;
1246 res = new_bd_Mod(db, block, memop, op1, op2);
1247 current_ir_graph = rem;
1253 new_rd_And (dbg_info *db, ir_graph *irg, ir_node *block,
1254 ir_node *op1, ir_node *op2, ir_mode *mode)
1257 ir_graph *rem = current_ir_graph;
1259 current_ir_graph = irg;
1260 res = new_bd_And(db, block, op1, op2, mode);
1261 current_ir_graph = rem;
1267 new_rd_Or (dbg_info *db, ir_graph *irg, ir_node *block,
1268 ir_node *op1, ir_node *op2, ir_mode *mode)
1271 ir_graph *rem = current_ir_graph;
1273 current_ir_graph = irg;
1274 res = new_bd_Or(db, block, op1, op2, mode);
1275 current_ir_graph = rem;
1281 new_rd_Eor (dbg_info *db, ir_graph *irg, ir_node *block,
1282 ir_node *op1, ir_node *op2, ir_mode *mode)
1285 ir_graph *rem = current_ir_graph;
1287 current_ir_graph = irg;
1288 res = new_bd_Eor(db, block, op1, op2, mode);
1289 current_ir_graph = rem;
1295 new_rd_Not (dbg_info *db, ir_graph *irg, ir_node *block,
1296 ir_node *op, ir_mode *mode)
1299 ir_graph *rem = current_ir_graph;
1301 current_ir_graph = irg;
1302 res = new_bd_Not(db, block, op, mode);
1303 current_ir_graph = rem;
1309 new_rd_Shl (dbg_info *db, ir_graph *irg, ir_node *block,
1310 ir_node *op, ir_node *k, ir_mode *mode)
1313 ir_graph *rem = current_ir_graph;
1315 current_ir_graph = irg;
1316 res = new_bd_Shl (db, block, op, k, mode);
1317 current_ir_graph = rem;
1323 new_rd_Shr (dbg_info *db, ir_graph *irg, ir_node *block,
1324 ir_node *op, ir_node *k, ir_mode *mode)
1327 ir_graph *rem = current_ir_graph;
1329 current_ir_graph = irg;
1330 res = new_bd_Shr(db, block, op, k, mode);
1331 current_ir_graph = rem;
1337 new_rd_Shrs (dbg_info *db, ir_graph *irg, ir_node *block,
1338 ir_node *op, ir_node *k, ir_mode *mode)
1341 ir_graph *rem = current_ir_graph;
1343 current_ir_graph = irg;
1344 res = new_bd_Shrs(db, block, op, k, mode);
1345 current_ir_graph = rem;
1351 new_rd_Rot (dbg_info *db, ir_graph *irg, ir_node *block,
1352 ir_node *op, ir_node *k, ir_mode *mode)
1355 ir_graph *rem = current_ir_graph;
1357 current_ir_graph = irg;
1358 res = new_bd_Rot(db, block, op, k, mode);
1359 current_ir_graph = rem;
1365 new_rd_Abs (dbg_info *db, ir_graph *irg, ir_node *block,
1366 ir_node *op, ir_mode *mode)
1369 ir_graph *rem = current_ir_graph;
1371 current_ir_graph = irg;
1372 res = new_bd_Abs(db, block, op, mode);
1373 current_ir_graph = rem;
1379 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1380 ir_node *op1, ir_node *op2)
1383 ir_graph *rem = current_ir_graph;
1385 current_ir_graph = irg;
1386 res = new_bd_Cmp(db, block, op1, op2);
1387 current_ir_graph = rem;
1393 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1396 ir_graph *rem = current_ir_graph;
1398 current_ir_graph = irg;
1399 res = new_bd_Jmp(db, block);
1400 current_ir_graph = rem;
1406 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1409 ir_graph *rem = current_ir_graph;
1411 current_ir_graph = irg;
1412 res = new_bd_IJmp(db, block, tgt);
1413 current_ir_graph = rem;
1419 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1422 ir_graph *rem = current_ir_graph;
1424 current_ir_graph = irg;
1425 res = new_bd_Cond(db, block, c);
1426 current_ir_graph = rem;
1432 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1433 ir_node *callee, int arity, ir_node **in, type *tp)
1436 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1439 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1440 current_ir_graph = rem;
1446 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1447 ir_node *store, int arity, ir_node **in)
1450 ir_graph *rem = current_ir_graph;
1452 current_ir_graph = irg;
1453 res = new_bd_Return(db, block, store, arity, in);
1454 current_ir_graph = rem;
1460 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1463 ir_graph *rem = current_ir_graph;
1465 current_ir_graph = irg;
1466 res = new_bd_Raise(db, block, store, obj);
1467 current_ir_graph = rem;
1473 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1474 ir_node *store, ir_node *adr, ir_mode *mode)
1477 ir_graph *rem = current_ir_graph;
1479 current_ir_graph = irg;
1480 res = new_bd_Load(db, block, store, adr, mode);
1481 current_ir_graph = rem;
1487 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1488 ir_node *store, ir_node *adr, ir_node *val)
1491 ir_graph *rem = current_ir_graph;
1493 current_ir_graph = irg;
1494 res = new_bd_Store(db, block, store, adr, val);
1495 current_ir_graph = rem;
1501 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1502 ir_node *size, type *alloc_type, where_alloc where)
1505 ir_graph *rem = current_ir_graph;
1507 current_ir_graph = irg;
1508 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1509 current_ir_graph = rem;
1515 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1516 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
1519 ir_graph *rem = current_ir_graph;
1521 current_ir_graph = irg;
1522 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1523 current_ir_graph = rem;
1529 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1530 int arity, ir_node **in, entity *ent)
1533 ir_graph *rem = current_ir_graph;
1535 current_ir_graph = irg;
1536 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1537 current_ir_graph = rem;
1543 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1544 ir_node *objptr, type *ent)
1547 ir_graph *rem = current_ir_graph;
1549 current_ir_graph = irg;
1550 res = new_bd_InstOf(db, block, store, objptr, ent);
1551 current_ir_graph = rem;
1557 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1558 symconst_kind symkind, type *tp)
1561 ir_graph *rem = current_ir_graph;
1563 current_ir_graph = irg;
1564 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1565 current_ir_graph = rem;
1571 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1572 symconst_kind symkind)
1574 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1578 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp)
1580 symconst_symbol sym = {(type *)symbol};
1581 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1584 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
1585 symconst_symbol sym = {(type *)symbol};
1586 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1589 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1590 symconst_symbol sym = {symbol};
1591 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1594 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1595 symconst_symbol sym = {symbol};
1596 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1600 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1603 ir_graph *rem = current_ir_graph;
1605 current_ir_graph = irg;
1606 res = new_bd_Sync(db, block, arity, in);
1607 current_ir_graph = rem;
1613 new_rd_Bad (ir_graph *irg)
1619 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1622 ir_graph *rem = current_ir_graph;
1624 current_ir_graph = irg;
1625 res = new_bd_Confirm(db, block, val, bound, cmp);
1626 current_ir_graph = rem;
1631 /* this function is often called with current_ir_graph unset */
1633 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1636 ir_graph *rem = current_ir_graph;
1638 current_ir_graph = irg;
1639 res = new_bd_Unknown(m);
1640 current_ir_graph = rem;
1646 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1649 ir_graph *rem = current_ir_graph;
1651 current_ir_graph = irg;
1652 res = new_bd_CallBegin(db, block, call);
1653 current_ir_graph = rem;
1659 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1663 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1665 IRN_VRFY_IRG(res, irg);
1670 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1674 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1675 irg->end_except = res;
1676 IRN_VRFY_IRG (res, irg);
1681 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1684 ir_graph *rem = current_ir_graph;
1686 current_ir_graph = irg;
1687 res = new_bd_Break(db, block);
1688 current_ir_graph = rem;
1694 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1698 ir_graph *rem = current_ir_graph;
1700 current_ir_graph = irg;
1701 res = new_bd_Filter(db, block, arg, mode, proj);
1702 current_ir_graph = rem;
1708 new_rd_NoMem (ir_graph *irg) {
1713 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1714 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1717 ir_graph *rem = current_ir_graph;
1719 current_ir_graph = irg;
1720 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1721 current_ir_graph = rem;
1726 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1727 ir_node *store, ir_node *dst, ir_node *src, type *data_type)
1730 ir_graph *rem = current_ir_graph;
1732 current_ir_graph = irg;
1733 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1734 current_ir_graph = rem;
1739 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1740 return new_rd_Block(NULL, irg, arity, in);
1742 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1743 return new_rd_Start(NULL, irg, block);
1745 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1746 return new_rd_End(NULL, irg, block);
1748 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1749 return new_rd_Jmp(NULL, irg, block);
1751 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1752 return new_rd_IJmp(NULL, irg, block, tgt);
1754 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1755 return new_rd_Cond(NULL, irg, block, c);
1757 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1758 ir_node *store, int arity, ir_node **in) {
1759 return new_rd_Return(NULL, irg, block, store, arity, in);
1761 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1762 ir_node *store, ir_node *obj) {
1763 return new_rd_Raise(NULL, irg, block, store, obj);
1765 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1766 ir_mode *mode, tarval *con) {
1767 return new_rd_Const(NULL, irg, block, mode, con);
1770 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1771 ir_mode *mode, long value) {
1772 return new_rd_Const_long(NULL, irg, block, mode, value);
1775 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1776 ir_mode *mode, tarval *con, type *tp) {
1777 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1780 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1781 symconst_symbol value, symconst_kind symkind) {
1782 return new_rd_SymConst(NULL, irg, block, value, symkind);
1784 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1785 ir_node *objptr, int n_index, ir_node **index,
1787 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1789 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1791 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
1793 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1794 ir_node *callee, int arity, ir_node **in,
1796 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1798 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1799 ir_node *op1, ir_node *op2, ir_mode *mode) {
1800 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1802 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1803 ir_node *op1, ir_node *op2, ir_mode *mode) {
1804 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1806 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1807 ir_node *op, ir_mode *mode) {
1808 return new_rd_Minus(NULL, irg, block, op, mode);
1810 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1811 ir_node *op1, ir_node *op2, ir_mode *mode) {
1812 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1814 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1815 ir_node *memop, ir_node *op1, ir_node *op2) {
1816 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1818 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1819 ir_node *memop, ir_node *op1, ir_node *op2) {
1820 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1822 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1823 ir_node *memop, ir_node *op1, ir_node *op2) {
1824 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1826 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1827 ir_node *memop, ir_node *op1, ir_node *op2) {
1828 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1830 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1831 ir_node *op, ir_mode *mode) {
1832 return new_rd_Abs(NULL, irg, block, op, mode);
1834 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1835 ir_node *op1, ir_node *op2, ir_mode *mode) {
1836 return new_rd_And(NULL, irg, block, op1, op2, mode);
1838 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1839 ir_node *op1, ir_node *op2, ir_mode *mode) {
1840 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1842 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1843 ir_node *op1, ir_node *op2, ir_mode *mode) {
1844 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1846 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1847 ir_node *op, ir_mode *mode) {
1848 return new_rd_Not(NULL, irg, block, op, mode);
1850 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1851 ir_node *op1, ir_node *op2) {
1852 return new_rd_Cmp(NULL, irg, block, op1, op2);
1854 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1855 ir_node *op, ir_node *k, ir_mode *mode) {
1856 return new_rd_Shl(NULL, irg, block, op, k, mode);
1858 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1859 ir_node *op, ir_node *k, ir_mode *mode) {
1860 return new_rd_Shr(NULL, irg, block, op, k, mode);
1862 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1863 ir_node *op, ir_node *k, ir_mode *mode) {
1864 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1866 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1867 ir_node *op, ir_node *k, ir_mode *mode) {
1868 return new_rd_Rot(NULL, irg, block, op, k, mode);
1870 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1871 ir_node *op, ir_mode *mode) {
1872 return new_rd_Conv(NULL, irg, block, op, mode);
1874 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1875 return new_rd_Cast(NULL, irg, block, op, to_tp);
1877 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1878 ir_node **in, ir_mode *mode) {
1879 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1881 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1882 ir_node *store, ir_node *adr, ir_mode *mode) {
1883 return new_rd_Load(NULL, irg, block, store, adr, mode);
1885 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1886 ir_node *store, ir_node *adr, ir_node *val) {
1887 return new_rd_Store(NULL, irg, block, store, adr, val);
1889 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1890 ir_node *size, type *alloc_type, where_alloc where) {
1891 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1893 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1894 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1895 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1897 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1898 return new_rd_Sync(NULL, irg, block, arity, in);
1900 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1901 ir_mode *mode, long proj) {
1902 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1904 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1906 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1908 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1909 int arity, ir_node **in) {
1910 return new_rd_Tuple(NULL, irg, block, arity, in );
1912 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1913 ir_node *val, ir_mode *mode) {
1914 return new_rd_Id(NULL, irg, block, val, mode);
1916 ir_node *new_r_Bad (ir_graph *irg) {
1917 return new_rd_Bad(irg);
1919 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1920 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1922 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1923 return new_rd_Unknown(irg, m);
1925 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1926 return new_rd_CallBegin(NULL, irg, block, callee);
1928 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1929 return new_rd_EndReg(NULL, irg, block);
1931 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1932 return new_rd_EndExcept(NULL, irg, block);
1934 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1935 return new_rd_Break(NULL, irg, block);
1937 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1938 ir_mode *mode, long proj) {
1939 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1941 ir_node *new_r_NoMem (ir_graph *irg) {
1942 return new_rd_NoMem(irg);
1944 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1945 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1946 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1949 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1950 ir_node *store, ir_node *dst, ir_node *src, type *data_type) {
1951 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1954 /** ********************/
1955 /** public interfaces */
1956 /** construction tools */
1960 * - create a new Start node in the current block
1962 * @return s - pointer to the created Start node
1967 new_d_Start (dbg_info *db)
1971 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1972 op_Start, mode_T, 0, NULL);
1973 /* res->attr.start.irg = current_ir_graph; */
1975 res = optimize_node(res);
1976 IRN_VRFY_IRG(res, current_ir_graph);
1981 new_d_End (dbg_info *db)
1984 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1985 op_End, mode_X, -1, NULL);
1986 res = optimize_node(res);
1987 IRN_VRFY_IRG(res, current_ir_graph);
1992 /* Constructs a Block with a fixed number of predecessors.
1993 Does set current_block. Can be used with automatic Phi
1994 node construction. */
1996 new_d_Block (dbg_info *db, int arity, ir_node **in)
2000 bool has_unknown = false;
2002 res = new_bd_Block(db, arity, in);
2004 /* Create and initialize array for Phi-node construction. */
2005 if (get_irg_phase_state(current_ir_graph) == phase_building) {
2006 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2007 current_ir_graph->n_loc);
2008 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2011 for (i = arity-1; i >= 0; i--)
2012 if (get_irn_op(in[i]) == op_Unknown) {
2017 if (!has_unknown) res = optimize_node(res);
2018 current_ir_graph->current_block = res;
2020 IRN_VRFY_IRG(res, current_ir_graph);
2025 /* ***********************************************************************/
2026 /* Methods necessary for automatic Phi node creation */
2028 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2029 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2030 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2031 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2033 Call Graph: ( A ---> B == A "calls" B)
2035 get_value mature_immBlock
2043 get_r_value_internal |
2047 new_rd_Phi0 new_rd_Phi_in
2049 * *************************************************************************** */
2051 /** Creates a Phi node with 0 predecessors */
2052 static INLINE ir_node *
2053 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2057 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
2058 IRN_VRFY_IRG(res, irg);
2062 /* There are two implementations of the Phi node construction. The first
2063 is faster, but does not work for blocks with more than 2 predecessors.
2064 The second works always but is slower and causes more unnecessary Phi
2066 Select the implementations by the following preprocessor flag set in
2068 #if USE_FAST_PHI_CONSTRUCTION
2070 /* This is a stack used for allocating and deallocating nodes in
2071 new_rd_Phi_in. The original implementation used the obstack
2072 to model this stack, now it is explicit. This reduces side effects.
2074 #if USE_EXPLICIT_PHI_IN_STACK
2076 new_Phi_in_stack(void) {
2079 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
2081 res->stack = NEW_ARR_F (ir_node *, 0);
2088 free_Phi_in_stack(Phi_in_stack *s) {
2089 DEL_ARR_F(s->stack);
2093 free_to_Phi_in_stack(ir_node *phi) {
2094 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
2095 current_ir_graph->Phi_in_stack->pos)
2096 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
2098 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
2100 (current_ir_graph->Phi_in_stack->pos)++;
2103 static INLINE ir_node *
2104 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
2105 int arity, ir_node **in) {
2107 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
2108 int pos = current_ir_graph->Phi_in_stack->pos;
2112 /* We need to allocate a new node */
2113 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
2114 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
2116 /* reuse the old node and initialize it again. */
2119 assert (res->kind == k_ir_node);
2120 assert (res->op == op_Phi);
2124 assert (arity >= 0);
2125 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
2126 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
2128 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
2130 (current_ir_graph->Phi_in_stack->pos)--;
2134 #endif /* USE_EXPLICIT_PHI_IN_STACK */
2136 /* Creates a Phi node with a given, fixed array **in of predecessors.
2137 If the Phi node is unnecessary, as the same value reaches the block
2138 through all control flow paths, it is eliminated and the value
2139 returned directly. This constructor is only intended for use in
2140 the automatic Phi node generation triggered by get_value or mature.
2141 The implementation is quite tricky and depends on the fact, that
2142 the nodes are allocated on a stack:
2143 The in array contains predecessors and NULLs. The NULLs appear,
2144 if get_r_value_internal, that computed the predecessors, reached
2145 the same block on two paths. In this case the same value reaches
2146 this block on both paths, there is no definition in between. We need
2147 not allocate a Phi where these path's merge, but we have to communicate
2148 this fact to the caller. This happens by returning a pointer to the
2149 node the caller _will_ allocate. (Yes, we predict the address. We can
2150 do so because the nodes are allocated on the obstack.) The caller then
2151 finds a pointer to itself and, when this routine is called again,
2154 static INLINE ir_node *
2155 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2158 ir_node *res, *known;
2160 /* Allocate a new node on the obstack. This can return a node to
2161 which some of the pointers in the in-array already point.
2162 Attention: the constructor copies the in array, i.e., the later
2163 changes to the array in this routine do not affect the
2164 constructed node! If the in array contains NULLs, there will be
2165 missing predecessors in the returned node. Is this a possible
2166 internal state of the Phi node generation? */
2167 #if USE_EXPLICIT_PHI_IN_STACK
2168 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
2170 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2171 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2174 /* The in-array can contain NULLs. These were returned by
2175 get_r_value_internal if it reached the same block/definition on a
2176 second path. The NULLs are replaced by the node itself to
2177 simplify the test in the next loop. */
2178 for (i = 0; i < ins; ++i) {
2183 /* This loop checks whether the Phi has more than one predecessor.
2184 If so, it is a real Phi node and we break the loop. Else the Phi
2185 node merges the same definition on several paths and therefore is
2187 for (i = 0; i < ins; ++i) {
2188 if (in[i] == res || in[i] == known)
2197 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2199 #if USE_EXPLICIT_PHI_IN_STACK
2200 free_to_Phi_in_stack(res);
2202 edges_node_deleted(res, current_ir_graph);
2203 obstack_free(current_ir_graph->obst, res);
2207 res = optimize_node (res);
2208 IRN_VRFY_IRG(res, irg);
2211 /* return the pointer to the Phi node. This node might be deallocated! */
2216 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2219 allocates and returns this node. The routine called to allocate the
2220 node might optimize it away and return a real value, or even a pointer
2221 to a deallocated Phi node on top of the obstack!
2222 This function is called with an in-array of proper size. **/
2224 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2226 ir_node *prevBlock, *res;
2229 /* This loop goes to all predecessor blocks of the block the Phi node is in
2230 and there finds the operands of the Phi node by calling
2231 get_r_value_internal. */
2232 for (i = 1; i <= ins; ++i) {
2233 assert (block->in[i]);
2234 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2236 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2239 /* After collecting all predecessors into the array nin a new Phi node
2240 with these predecessors is created. This constructor contains an
2241 optimization: If all predecessors of the Phi node are identical it
2242 returns the only operand instead of a new Phi node. If the value
2243 passes two different control flow edges without being defined, and
2244 this is the second path treated, a pointer to the node that will be
2245 allocated for the first path (recursion) is returned. We already
2246 know the address of this node, as it is the next node to be allocated
2247 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2248 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2250 /* Now we now the value for "pos" and can enter it in the array with
2251 all known local variables. Attention: this might be a pointer to
2252 a node, that later will be allocated!!! See new_rd_Phi_in.
2253 If this is called in mature, after some set_value in the same block,
2254 the proper value must not be overwritten:
2256 get_value (makes Phi0, put's it into graph_arr)
2257 set_value (overwrites Phi0 in graph_arr)
2258 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2261 if (!block->attr.block.graph_arr[pos]) {
2262 block->attr.block.graph_arr[pos] = res;
2264 /* printf(" value already computed by %s\n",
2265 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2271 /* This function returns the last definition of a variable. In case
2272 this variable was last defined in a previous block, Phi nodes are
2273 inserted. If the part of the firm graph containing the definition
2274 is not yet constructed, a dummy Phi node is returned. */
2276 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2279 /* There are 4 cases to treat.
2281 1. The block is not mature and we visit it the first time. We can not
2282 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2283 predecessors is returned. This node is added to the linked list (field
2284 "link") of the containing block to be completed when this block is
2285 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2288 2. The value is already known in this block, graph_arr[pos] is set and we
2289 visit the block the first time. We can return the value without
2290 creating any new nodes.
2292 3. The block is mature and we visit it the first time. A Phi node needs
2293 to be created (phi_merge). If the Phi is not needed, as all it's
2294 operands are the same value reaching the block through different
2295 paths, it's optimized away and the value itself is returned.
2297 4. The block is mature, and we visit it the second time. Now two
2298 subcases are possible:
2299 * The value was computed completely the last time we were here. This
2300 is the case if there is no loop. We can return the proper value.
2301 * The recursion that visited this node and set the flag did not
2302 return yet. We are computing a value in a loop and need to
2303 break the recursion without knowing the result yet.
2304 @@@ strange case. Straight forward we would create a Phi before
2305 starting the computation of it's predecessors. In this case we will
2306 find a Phi here in any case. The problem is that this implementation
2307 only creates a Phi after computing the predecessors, so that it is
2308 hard to compute self references of this Phi. @@@
2309 There is no simple check for the second subcase. Therefore we check
2310 for a second visit and treat all such cases as the second subcase.
2311 Anyways, the basic situation is the same: we reached a block
2312 on two paths without finding a definition of the value: No Phi
2313 nodes are needed on both paths.
2314 We return this information "Two paths, no Phi needed" by a very tricky
2315 implementation that relies on the fact that an obstack is a stack and
2316 will return a node with the same address on different allocations.
2317 Look also at phi_merge and new_rd_phi_in to understand this.
2318 @@@ Unfortunately this does not work, see testprogram
2319 three_cfpred_example.
2323 /* case 4 -- already visited. */
2324 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2326 /* visited the first time */
2327 set_irn_visited(block, get_irg_visited(current_ir_graph));
2329 /* Get the local valid value */
2330 res = block->attr.block.graph_arr[pos];
2332 /* case 2 -- If the value is actually computed, return it. */
2333 if (res) return res;
2335 if (block->attr.block.matured) { /* case 3 */
2337 /* The Phi has the same amount of ins as the corresponding block. */
2338 int ins = get_irn_arity(block);
2340 NEW_ARR_A (ir_node *, nin, ins);
2342 /* Phi merge collects the predecessors and then creates a node. */
2343 res = phi_merge (block, pos, mode, nin, ins);
2345 } else { /* case 1 */
2346 /* The block is not mature, we don't know how many in's are needed. A Phi
2347 with zero predecessors is created. Such a Phi node is called Phi0
2348 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2349 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2351 The Phi0 has to remember the pos of it's internal value. If the real
2352 Phi is computed, pos is used to update the array with the local
2355 res = new_rd_Phi0 (current_ir_graph, block, mode);
2356 res->attr.phi0_pos = pos;
2357 res->link = block->link;
2361 /* If we get here, the frontend missed a use-before-definition error */
2364 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2365 assert (mode->code >= irm_F && mode->code <= irm_P);
2366 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2367 tarval_mode_null[mode->code]);
2370 /* The local valid value is available now. */
2371 block->attr.block.graph_arr[pos] = res;
2379 it starts the recursion. This causes an Id at the entry of
2380 every block that has no definition of the value! **/
2382 #if USE_EXPLICIT_PHI_IN_STACK
2384 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2385 void free_Phi_in_stack(Phi_in_stack *s) { }
2388 static INLINE ir_node *
2389 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2390 ir_node **in, int ins, ir_node *phi0)
2393 ir_node *res, *known;
2395 /* Allocate a new node on the obstack. The allocation copies the in
2397 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2398 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2400 /* This loop checks whether the Phi has more than one predecessor.
2401 If so, it is a real Phi node and we break the loop. Else the
2402 Phi node merges the same definition on several paths and therefore
2403 is not needed. Don't consider Bad nodes! */
2405 for (i=0; i < ins; ++i)
2409 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2411 /* Optimize self referencing Phis: We can't detect them yet properly, as
2412 they still refer to the Phi0 they will replace. So replace right now. */
2413 if (phi0 && in[i] == phi0) in[i] = res;
2415 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2423 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2426 edges_node_deleted(res, current_ir_graph);
2427 obstack_free (current_ir_graph->obst, res);
2428 if (is_Phi(known)) {
2429 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2430 order, an enclosing Phi know may get superfluous. */
2431 res = optimize_in_place_2(known);
2433 exchange(known, res);
2439 /* A undefined value, e.g., in unreachable code. */
2443 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2444 IRN_VRFY_IRG(res, irg);
2445 /* Memory Phis in endless loops must be kept alive.
2446 As we can't distinguish these easily we keep all of them alive. */
2447 if ((res->op == op_Phi) && (mode == mode_M))
2448 add_End_keepalive(irg->end, res);
2455 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2457 #if PRECISE_EXC_CONTEXT
2459 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2461 /* Construct a new frag_array for node n.
2462 Copy the content from the current graph_arr of the corresponding block:
2463 this is the current state.
2464 Set ProjM(n) as current memory state.
2465 Further the last entry in frag_arr of current block points to n. This
2466 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2468 static INLINE ir_node ** new_frag_arr (ir_node *n)
2473 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2474 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2475 sizeof(ir_node *)*current_ir_graph->n_loc);
2477 /* turn off optimization before allocating Proj nodes, as res isn't
2479 opt = get_opt_optimize(); set_optimize(0);
2480 /* Here we rely on the fact that all frag ops have Memory as first result! */
2481 if (get_irn_op(n) == op_Call)
2482 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2484 assert((pn_Quot_M == pn_DivMod_M) &&
2485 (pn_Quot_M == pn_Div_M) &&
2486 (pn_Quot_M == pn_Mod_M) &&
2487 (pn_Quot_M == pn_Load_M) &&
2488 (pn_Quot_M == pn_Store_M) &&
2489 (pn_Quot_M == pn_Alloc_M) );
2490 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2494 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2499 * returns the frag_arr from a node
2501 static INLINE ir_node **
2502 get_frag_arr (ir_node *n) {
2503 switch (get_irn_opcode(n)) {
2505 return n->attr.call.exc.frag_arr;
2507 return n->attr.a.exc.frag_arr;
2509 return n->attr.load.exc.frag_arr;
2511 return n->attr.store.exc.frag_arr;
2513 return n->attr.except.frag_arr;
2518 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2520 if (!frag_arr[pos]) frag_arr[pos] = val;
2521 if (frag_arr[current_ir_graph->n_loc - 1]) {
2522 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2523 assert(arr != frag_arr && "Endless recursion detected");
2524 set_frag_value(arr, pos, val);
2529 for (i = 0; i < 1000; ++i) {
2530 if (!frag_arr[pos]) {
2531 frag_arr[pos] = val;
2533 if (frag_arr[current_ir_graph->n_loc - 1]) {
2534 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2540 assert(0 && "potential endless recursion");
2545 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2549 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2551 frag_arr = get_frag_arr(cfOp);
2552 res = frag_arr[pos];
2554 if (block->attr.block.graph_arr[pos]) {
2555 /* There was a set_value after the cfOp and no get_value before that
2556 set_value. We must build a Phi node now. */
2557 if (block->attr.block.matured) {
2558 int ins = get_irn_arity(block);
2560 NEW_ARR_A (ir_node *, nin, ins);
2561 res = phi_merge(block, pos, mode, nin, ins);
2563 res = new_rd_Phi0 (current_ir_graph, block, mode);
2564 res->attr.phi0_pos = pos;
2565 res->link = block->link;
2569 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2570 but this should be better: (remove comment if this works) */
2571 /* It's a Phi, we can write this into all graph_arrs with NULL */
2572 set_frag_value(block->attr.block.graph_arr, pos, res);
2574 res = get_r_value_internal(block, pos, mode);
2575 set_frag_value(block->attr.block.graph_arr, pos, res);
2583 computes the predecessors for the real phi node, and then
2584 allocates and returns this node. The routine called to allocate the
2585 node might optimize it away and return a real value.
2586 This function must be called with an in-array of proper size. **/
2588 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2590 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2593 /* If this block has no value at pos create a Phi0 and remember it
2594 in graph_arr to break recursions.
2595 Else we may not set graph_arr as there a later value is remembered. */
2597 if (!block->attr.block.graph_arr[pos]) {
2598 if (block == get_irg_start_block(current_ir_graph)) {
2599 /* Collapsing to Bad tarvals is no good idea.
2600 So we call a user-supplied routine here that deals with this case as
2601 appropriate for the given language. Sorrily the only help we can give
2602 here is the position.
2604 Even if all variables are defined before use, it can happen that
2605 we get to the start block, if a Cond has been replaced by a tuple
2606 (bad, jmp). In this case we call the function needlessly, eventually
2607 generating an non existent error.
2608 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2611 if (default_initialize_local_variable) {
2612 ir_node *rem = get_cur_block();
2614 set_cur_block(block);
2615 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2619 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2620 /* We don't need to care about exception ops in the start block.
2621 There are none by definition. */
2622 return block->attr.block.graph_arr[pos];
2624 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2625 block->attr.block.graph_arr[pos] = phi0;
2626 #if PRECISE_EXC_CONTEXT
2627 if (get_opt_precise_exc_context()) {
2628 /* Set graph_arr for fragile ops. Also here we should break recursion.
2629 We could choose a cyclic path through an cfop. But the recursion would
2630 break at some point. */
2631 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2637 /* This loop goes to all predecessor blocks of the block the Phi node
2638 is in and there finds the operands of the Phi node by calling
2639 get_r_value_internal. */
2640 for (i = 1; i <= ins; ++i) {
2641 prevCfOp = skip_Proj(block->in[i]);
2643 if (is_Bad(prevCfOp)) {
2644 /* In case a Cond has been optimized we would get right to the start block
2645 with an invalid definition. */
2646 nin[i-1] = new_Bad();
2649 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2651 if (!is_Bad(prevBlock)) {
2652 #if PRECISE_EXC_CONTEXT
2653 if (get_opt_precise_exc_context() &&
2654 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2655 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2656 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2659 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2661 nin[i-1] = new_Bad();
2665 /* We want to pass the Phi0 node to the constructor: this finds additional
2666 optimization possibilities.
2667 The Phi0 node either is allocated in this function, or it comes from
2668 a former call to get_r_value_internal. In this case we may not yet
2669 exchange phi0, as this is done in mature_immBlock. */
2671 phi0_all = block->attr.block.graph_arr[pos];
2672 if (!((get_irn_op(phi0_all) == op_Phi) &&
2673 (get_irn_arity(phi0_all) == 0) &&
2674 (get_nodes_block(phi0_all) == block)))
2680 /* After collecting all predecessors into the array nin a new Phi node
2681 with these predecessors is created. This constructor contains an
2682 optimization: If all predecessors of the Phi node are identical it
2683 returns the only operand instead of a new Phi node. */
2684 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2686 /* In case we allocated a Phi0 node at the beginning of this procedure,
2687 we need to exchange this Phi0 with the real Phi. */
2689 exchange(phi0, res);
2690 block->attr.block.graph_arr[pos] = res;
2691 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2692 only an optimization. */
2698 /* This function returns the last definition of a variable. In case
2699 this variable was last defined in a previous block, Phi nodes are
2700 inserted. If the part of the firm graph containing the definition
2701 is not yet constructed, a dummy Phi node is returned. */
2703 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2706 /* There are 4 cases to treat.
2708 1. The block is not mature and we visit it the first time. We can not
2709 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2710 predecessors is returned. This node is added to the linked list (field
2711 "link") of the containing block to be completed when this block is
2712 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2715 2. The value is already known in this block, graph_arr[pos] is set and we
2716 visit the block the first time. We can return the value without
2717 creating any new nodes.
2719 3. The block is mature and we visit it the first time. A Phi node needs
2720 to be created (phi_merge). If the Phi is not needed, as all it's
2721 operands are the same value reaching the block through different
2722 paths, it's optimized away and the value itself is returned.
2724 4. The block is mature, and we visit it the second time. Now two
2725 subcases are possible:
2726 * The value was computed completely the last time we were here. This
2727 is the case if there is no loop. We can return the proper value.
2728 * The recursion that visited this node and set the flag did not
2729 return yet. We are computing a value in a loop and need to
2730 break the recursion. This case only happens if we visited
2731 the same block with phi_merge before, which inserted a Phi0.
2732 So we return the Phi0.
2735 /* case 4 -- already visited. */
2736 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2737 /* As phi_merge allocates a Phi0 this value is always defined. Here
2738 is the critical difference of the two algorithms. */
2739 assert(block->attr.block.graph_arr[pos]);
2740 return block->attr.block.graph_arr[pos];
2743 /* visited the first time */
2744 set_irn_visited(block, get_irg_visited(current_ir_graph));
2746 /* Get the local valid value */
2747 res = block->attr.block.graph_arr[pos];
2749 /* case 2 -- If the value is actually computed, return it. */
2750 if (res) { return res; };
2752 if (block->attr.block.matured) { /* case 3 */
2754 /* The Phi has the same amount of ins as the corresponding block. */
2755 int ins = get_irn_arity(block);
2757 NEW_ARR_A (ir_node *, nin, ins);
2759 /* Phi merge collects the predecessors and then creates a node. */
2760 res = phi_merge (block, pos, mode, nin, ins);
2762 } else { /* case 1 */
2763 /* The block is not mature, we don't know how many in's are needed. A Phi
2764 with zero predecessors is created. Such a Phi node is called Phi0
2765 node. The Phi0 is then added to the list of Phi0 nodes in this block
2766 to be matured by mature_immBlock later.
2767 The Phi0 has to remember the pos of it's internal value. If the real
2768 Phi is computed, pos is used to update the array with the local
2770 res = new_rd_Phi0 (current_ir_graph, block, mode);
2771 res->attr.phi0_pos = pos;
2772 res->link = block->link;
2776 /* If we get here, the frontend missed a use-before-definition error */
2779 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2780 assert (mode->code >= irm_F && mode->code <= irm_P);
2781 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2782 get_mode_null(mode));
2785 /* The local valid value is available now. */
2786 block->attr.block.graph_arr[pos] = res;
2791 #endif /* USE_FAST_PHI_CONSTRUCTION */
2793 /* ************************************************************************** */
2796 * Finalize a Block node, when all control flows are known.
2797 * Acceptable parameters are only Block nodes.
2800 mature_immBlock (ir_node *block)
2806 assert (get_irn_opcode(block) == iro_Block);
2807 /* @@@ should be commented in
2808 assert (!get_Block_matured(block) && "Block already matured"); */
2810 if (!get_Block_matured(block)) {
2811 ins = ARR_LEN (block->in)-1;
2812 /* Fix block parameters */
2813 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2815 /* An array for building the Phi nodes. */
2816 NEW_ARR_A (ir_node *, nin, ins);
2818 /* Traverse a chain of Phi nodes attached to this block and mature
2820 for (n = block->link; n; n=next) {
2821 inc_irg_visited(current_ir_graph);
2823 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2826 block->attr.block.matured = 1;
2828 /* Now, as the block is a finished firm node, we can optimize it.
2829 Since other nodes have been allocated since the block was created
2830 we can not free the node on the obstack. Therefore we have to call
2832 Unfortunately the optimization does not change a lot, as all allocated
2833 nodes refer to the unoptimized node.
2834 We can call _2, as global cse has no effect on blocks. */
2835 block = optimize_in_place_2(block);
2836 IRN_VRFY_IRG(block, current_ir_graph);
2841 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2843 return new_bd_Phi(db, current_ir_graph->current_block,
2848 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2850 return new_bd_Const(db, current_ir_graph->start_block,
2855 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2857 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2861 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, type *tp)
2863 return new_bd_Const_type(db, current_ir_graph->start_block,
2869 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2871 return new_bd_Id(db, current_ir_graph->current_block,
2876 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2878 return new_bd_Proj(db, current_ir_graph->current_block,
2883 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2886 assert(arg->op == op_Cond);
2887 arg->attr.c.kind = fragmentary;
2888 arg->attr.c.default_proj = max_proj;
2889 res = new_Proj (arg, mode_X, max_proj);
2894 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2896 return new_bd_Conv(db, current_ir_graph->current_block,
2901 new_d_Cast (dbg_info *db, ir_node *op, type *to_tp)
2903 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2907 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2909 return new_bd_Tuple(db, current_ir_graph->current_block,
2914 new_d_Add (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2916 return new_bd_Add(db, current_ir_graph->current_block,
2921 new_d_Sub (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2923 return new_bd_Sub(db, current_ir_graph->current_block,
2929 new_d_Minus (dbg_info *db, ir_node *op, ir_mode *mode)
2931 return new_bd_Minus(db, current_ir_graph->current_block,
2936 new_d_Mul (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2938 return new_bd_Mul(db, current_ir_graph->current_block,
2943 * allocate the frag array
2945 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2946 if (get_opt_precise_exc_context()) {
2947 if ((current_ir_graph->phase_state == phase_building) &&
2948 (get_irn_op(res) == op) && /* Could be optimized away. */
2949 !*frag_store) /* Could be a cse where the arr is already set. */ {
2950 *frag_store = new_frag_arr(res);
2957 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2960 res = new_bd_Quot (db, current_ir_graph->current_block,
2962 res->attr.except.pin_state = op_pin_state_pinned;
2963 #if PRECISE_EXC_CONTEXT
2964 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2971 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2974 res = new_bd_DivMod (db, current_ir_graph->current_block,
2976 res->attr.except.pin_state = op_pin_state_pinned;
2977 #if PRECISE_EXC_CONTEXT
2978 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2985 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2988 res = new_bd_Div (db, current_ir_graph->current_block,
2990 res->attr.except.pin_state = op_pin_state_pinned;
2991 #if PRECISE_EXC_CONTEXT
2992 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2999 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3002 res = new_bd_Mod (db, current_ir_graph->current_block,
3004 res->attr.except.pin_state = op_pin_state_pinned;
3005 #if PRECISE_EXC_CONTEXT
3006 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
3013 new_d_And (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3015 return new_bd_And (db, current_ir_graph->current_block,
3020 new_d_Or (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3022 return new_bd_Or (db, current_ir_graph->current_block,
3027 new_d_Eor (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3029 return new_bd_Eor (db, current_ir_graph->current_block,
3034 new_d_Not (dbg_info *db, ir_node *op, ir_mode *mode)
3036 return new_bd_Not (db, current_ir_graph->current_block,
3041 new_d_Shl (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3043 return new_bd_Shl (db, current_ir_graph->current_block,
3048 new_d_Shr (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3050 return new_bd_Shr (db, current_ir_graph->current_block,
3055 new_d_Shrs (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3057 return new_bd_Shrs (db, current_ir_graph->current_block,
3062 new_d_Rot (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3064 return new_bd_Rot (db, current_ir_graph->current_block,
3069 new_d_Abs (dbg_info *db, ir_node *op, ir_mode *mode)
3071 return new_bd_Abs (db, current_ir_graph->current_block,
3076 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
3078 return new_bd_Cmp (db, current_ir_graph->current_block,
3083 new_d_Jmp (dbg_info *db)
3085 return new_bd_Jmp (db, current_ir_graph->current_block);
3089 new_d_IJmp (dbg_info *db, ir_node *tgt)
3091 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
3095 new_d_Cond (dbg_info *db, ir_node *c)
3097 return new_bd_Cond (db, current_ir_graph->current_block, c);
3101 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
3105 res = new_bd_Call (db, current_ir_graph->current_block,
3106 store, callee, arity, in, tp);
3107 #if PRECISE_EXC_CONTEXT
3108 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
3115 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
3117 return new_bd_Return (db, current_ir_graph->current_block,
3122 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
3124 return new_bd_Raise (db, current_ir_graph->current_block,
3129 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
3132 res = new_bd_Load (db, current_ir_graph->current_block,
3134 #if PRECISE_EXC_CONTEXT
3135 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
3142 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
3145 res = new_bd_Store (db, current_ir_graph->current_block,
3147 #if PRECISE_EXC_CONTEXT
3148 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
3155 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, type *alloc_type,
3159 res = new_bd_Alloc (db, current_ir_graph->current_block,
3160 store, size, alloc_type, where);
3161 #if PRECISE_EXC_CONTEXT
3162 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
3169 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
3170 ir_node *size, type *free_type, where_alloc where)
3172 return new_bd_Free (db, current_ir_graph->current_block,
3173 store, ptr, size, free_type, where);
3177 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
3178 /* GL: objptr was called frame before. Frame was a bad choice for the name
3179 as the operand could as well be a pointer to a dynamic object. */
3181 return new_bd_Sel (db, current_ir_graph->current_block,
3182 store, objptr, 0, NULL, ent);
3186 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
3188 return new_bd_Sel (db, current_ir_graph->current_block,
3189 store, objptr, n_index, index, sel);
3193 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
3195 return (new_bd_InstOf (db, current_ir_graph->current_block,
3196 store, objptr, ent));
3200 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, type *tp)
3202 return new_bd_SymConst_type (db, current_ir_graph->start_block,
3207 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
3209 return new_bd_SymConst (db, current_ir_graph->start_block,
3214 new_d_Sync (dbg_info *db, int arity, ir_node** in)
3216 return new_bd_Sync (db, current_ir_graph->current_block,
3223 return _new_d_Bad();
3227 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
3229 return new_bd_Confirm (db, current_ir_graph->current_block,
3234 new_d_Unknown (ir_mode *m)
3236 return new_bd_Unknown(m);
3240 new_d_CallBegin (dbg_info *db, ir_node *call)
3243 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
3248 new_d_EndReg (dbg_info *db)
3251 res = new_bd_EndReg(db, current_ir_graph->current_block);
3256 new_d_EndExcept (dbg_info *db)
3259 res = new_bd_EndExcept(db, current_ir_graph->current_block);
3264 new_d_Break (dbg_info *db)
3266 return new_bd_Break (db, current_ir_graph->current_block);
3270 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
3272 return new_bd_Filter (db, current_ir_graph->current_block,
3279 return _new_d_NoMem();
3283 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
3284 ir_node *ir_true, ir_mode *mode) {
3285 return new_bd_Mux (db, current_ir_graph->current_block,
3286 sel, ir_false, ir_true, mode);
3289 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
3290 ir_node *dst, ir_node *src, type *data_type) {
3292 res = new_bd_CopyB(db, current_ir_graph->current_block,
3293 store, dst, src, data_type);
3294 #if PRECISE_EXC_CONTEXT
3295 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
3300 /* ********************************************************************* */
3301 /* Comfortable interface with automatic Phi node construction. */
3302 /* (Uses also constructors of ?? interface, except new_Block. */
3303 /* ********************************************************************* */
3305 /* Block construction */
3306 /* immature Block without predecessors */
3307 ir_node *new_d_immBlock (dbg_info *db) {
3310 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3311 /* creates a new dynamic in-array as length of in is -1 */
3312 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3313 current_ir_graph->current_block = res;
3314 res->attr.block.matured = 0;
3315 res->attr.block.dead = 0;
3316 /* res->attr.block.exc = exc_normal; */
3317 /* res->attr.block.handler_entry = 0; */
3318 res->attr.block.irg = current_ir_graph;
3319 res->attr.block.backedge = NULL;
3320 res->attr.block.in_cg = NULL;
3321 res->attr.block.cg_backedge = NULL;
3322 set_Block_block_visited(res, 0);
3324 /* Create and initialize array for Phi-node construction. */
3325 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3326 current_ir_graph->n_loc);
3327 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3329 /* Immature block may not be optimized! */
3330 IRN_VRFY_IRG(res, current_ir_graph);
3336 new_immBlock (void) {
3337 return new_d_immBlock(NULL);
3340 /* add an edge to a jmp/control flow node */
3342 add_immBlock_pred (ir_node *block, ir_node *jmp)
3344 if (block->attr.block.matured) {
3345 assert(0 && "Error: Block already matured!\n");
3348 assert(jmp != NULL);
3349 ARR_APP1(ir_node *, block->in, jmp);
3353 /* changing the current block */
3355 set_cur_block (ir_node *target) {
3356 current_ir_graph->current_block = target;
3359 /* ************************ */
3360 /* parameter administration */
3362 /* get a value from the parameter array from the current block by its index */
3364 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3366 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3367 inc_irg_visited(current_ir_graph);
3369 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3371 /* get a value from the parameter array from the current block by its index */
3373 get_value (int pos, ir_mode *mode)
3375 return get_d_value(NULL, pos, mode);
3378 /* set a value at position pos in the parameter array from the current block */
3380 set_value (int pos, ir_node *value)
3382 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3383 assert(pos+1 < current_ir_graph->n_loc);
3384 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3387 /* get the current store */
3391 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3392 /* GL: one could call get_value instead */
3393 inc_irg_visited(current_ir_graph);
3394 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3397 /* set the current store */
3399 set_store (ir_node *store)
3401 /* GL: one could call set_value instead */
3402 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3403 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3407 keep_alive (ir_node *ka) {
3408 add_End_keepalive(current_ir_graph->end, ka);
3411 /* --- Useful access routines --- */
3412 /* Returns the current block of the current graph. To set the current
3413 block use set_cur_block. */
3414 ir_node *get_cur_block(void) {
3415 return get_irg_current_block(current_ir_graph);
3418 /* Returns the frame type of the current graph */
3419 type *get_cur_frame_type(void) {
3420 return get_irg_frame_type(current_ir_graph);
3424 /* ********************************************************************* */
3427 /* call once for each run of the library */
3429 init_cons(uninitialized_local_variable_func_t *func)
3431 default_initialize_local_variable = func;
3434 /* call for each graph */
3436 irg_finalize_cons (ir_graph *irg) {
3437 irg->phase_state = phase_high;
3441 irp_finalize_cons (void) {
3442 int i, n_irgs = get_irp_n_irgs();
3443 for (i = 0; i < n_irgs; i++) {
3444 irg_finalize_cons(get_irp_irg(i));
3446 irp->phase_state = phase_high;\
3452 ir_node *new_Block(int arity, ir_node **in) {
3453 return new_d_Block(NULL, arity, in);
3455 ir_node *new_Start (void) {
3456 return new_d_Start(NULL);
3458 ir_node *new_End (void) {
3459 return new_d_End(NULL);
3461 ir_node *new_Jmp (void) {
3462 return new_d_Jmp(NULL);
3464 ir_node *new_IJmp (ir_node *tgt) {
3465 return new_d_IJmp(NULL, tgt);
3467 ir_node *new_Cond (ir_node *c) {
3468 return new_d_Cond(NULL, c);
3470 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3471 return new_d_Return(NULL, store, arity, in);
3473 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3474 return new_d_Raise(NULL, store, obj);
3476 ir_node *new_Const (ir_mode *mode, tarval *con) {
3477 return new_d_Const(NULL, mode, con);
3480 ir_node *new_Const_long(ir_mode *mode, long value)
3482 return new_d_Const_long(NULL, mode, value);
3485 ir_node *new_Const_type(tarval *con, type *tp) {
3486 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3489 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3490 return new_d_SymConst(NULL, value, kind);
3492 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3493 return new_d_simpleSel(NULL, store, objptr, ent);
3495 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3497 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3499 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
3500 return new_d_InstOf (NULL, store, objptr, ent);
3502 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3504 return new_d_Call(NULL, store, callee, arity, in, tp);
3506 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3507 return new_d_Add(NULL, op1, op2, mode);
3509 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3510 return new_d_Sub(NULL, op1, op2, mode);
3512 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3513 return new_d_Minus(NULL, op, mode);
3515 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3516 return new_d_Mul(NULL, op1, op2, mode);
3518 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3519 return new_d_Quot(NULL, memop, op1, op2);
3521 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3522 return new_d_DivMod(NULL, memop, op1, op2);
3524 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3525 return new_d_Div(NULL, memop, op1, op2);
3527 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3528 return new_d_Mod(NULL, memop, op1, op2);
3530 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3531 return new_d_Abs(NULL, op, mode);
3533 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3534 return new_d_And(NULL, op1, op2, mode);
3536 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3537 return new_d_Or(NULL, op1, op2, mode);
3539 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3540 return new_d_Eor(NULL, op1, op2, mode);
3542 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3543 return new_d_Not(NULL, op, mode);
3545 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3546 return new_d_Shl(NULL, op, k, mode);
3548 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3549 return new_d_Shr(NULL, op, k, mode);
3551 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3552 return new_d_Shrs(NULL, op, k, mode);
3554 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3555 return new_d_Rot(NULL, op, k, mode);
3557 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3558 return new_d_Cmp(NULL, op1, op2);
3560 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3561 return new_d_Conv(NULL, op, mode);
3563 ir_node *new_Cast (ir_node *op, type *to_tp) {
3564 return new_d_Cast(NULL, op, to_tp);
3566 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3567 return new_d_Phi(NULL, arity, in, mode);
3569 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3570 return new_d_Load(NULL, store, addr, mode);
3572 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3573 return new_d_Store(NULL, store, addr, val);
3575 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
3576 where_alloc where) {
3577 return new_d_Alloc(NULL, store, size, alloc_type, where);
3579 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3580 type *free_type, where_alloc where) {
3581 return new_d_Free(NULL, store, ptr, size, free_type, where);
3583 ir_node *new_Sync (int arity, ir_node **in) {
3584 return new_d_Sync(NULL, arity, in);
3586 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3587 return new_d_Proj(NULL, arg, mode, proj);
3589 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3590 return new_d_defaultProj(NULL, arg, max_proj);
3592 ir_node *new_Tuple (int arity, ir_node **in) {
3593 return new_d_Tuple(NULL, arity, in);
3595 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3596 return new_d_Id(NULL, val, mode);
3598 ir_node *new_Bad (void) {
3601 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3602 return new_d_Confirm (NULL, val, bound, cmp);
3604 ir_node *new_Unknown(ir_mode *m) {
3605 return new_d_Unknown(m);
3607 ir_node *new_CallBegin (ir_node *callee) {
3608 return new_d_CallBegin(NULL, callee);
3610 ir_node *new_EndReg (void) {
3611 return new_d_EndReg(NULL);
3613 ir_node *new_EndExcept (void) {
3614 return new_d_EndExcept(NULL);
3616 ir_node *new_Break (void) {
3617 return new_d_Break(NULL);
3619 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3620 return new_d_Filter(NULL, arg, mode, proj);
3622 ir_node *new_NoMem (void) {
3623 return new_d_NoMem();
3625 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3626 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3628 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, type *data_type) {
3629 return new_d_CopyB(NULL, store, dst, src, data_type);