3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* Constructs a Block with a fixed number of predecessors.
67 Does not set current_block. Can not be used with automatic
68 Phi node construction. */
70 new_bd_Block (dbg_info *db, int arity, ir_node **in)
73 ir_graph *irg = current_ir_graph;
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
86 res->attr.block.extblk = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_bd_Start (dbg_info *db, ir_node *block)
96 ir_graph *irg = current_ir_graph;
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_bd_End (dbg_info *db, ir_node *block)
109 ir_graph *irg = current_ir_graph;
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 ir_graph *irg = current_ir_graph;
125 bool has_unknown = false;
127 /* Don't assert that block matured: the use of this constructor is strongly
129 if ( get_Block_matured(block) )
130 assert( get_irn_arity(block) == arity );
132 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
134 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
136 for (i = arity-1; i >= 0; i--)
137 if (get_irn_op(in[i]) == op_Unknown) {
142 if (!has_unknown) res = optimize_node (res);
143 IRN_VRFY_IRG(res, irg);
145 /* Memory Phis in endless loops must be kept alive.
146 As we can't distinguish these easily we keep all of them alive. */
147 if ((res->op == op_Phi) && (mode == mode_M))
148 add_End_keepalive(irg->end, res);
153 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, type *tp)
156 ir_graph *irg = current_ir_graph;
158 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
159 res->attr.con.tv = con;
160 set_Const_type(res, tp); /* Call method because of complex assertion. */
161 res = optimize_node (res);
162 assert(get_Const_type(res) == tp);
163 IRN_VRFY_IRG(res, irg);
169 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
171 ir_graph *irg = current_ir_graph;
173 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
177 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
179 ir_graph *irg = current_ir_graph;
181 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
185 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
191 res = optimize_node(res);
192 IRN_VRFY_IRG(res, irg);
197 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
204 res->attr.proj = proj;
207 assert(get_Proj_pred(res));
208 assert(get_nodes_block(get_Proj_pred(res)));
210 res = optimize_node(res);
212 IRN_VRFY_IRG(res, irg);
218 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
222 ir_graph *irg = current_ir_graph;
224 assert(arg->op == op_Cond);
225 arg->attr.c.kind = fragmentary;
226 arg->attr.c.default_proj = max_proj;
227 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
232 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
235 ir_graph *irg = current_ir_graph;
237 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
238 res = optimize_node(res);
239 IRN_VRFY_IRG(res, irg);
244 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, type *to_tp)
247 ir_graph *irg = current_ir_graph;
249 assert(is_atomic_type(to_tp));
251 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
252 res->attr.cast.totype = to_tp;
253 res = optimize_node(res);
254 IRN_VRFY_IRG(res, irg);
259 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_bd_Add (dbg_info *db, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
276 ir_graph *irg = current_ir_graph;
280 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
281 res = optimize_node(res);
282 IRN_VRFY_IRG(res, irg);
287 new_bd_Sub (dbg_info *db, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
297 res = optimize_node (res);
298 IRN_VRFY_IRG(res, irg);
304 new_bd_Minus (dbg_info *db, ir_node *block,
305 ir_node *op, ir_mode *mode)
308 ir_graph *irg = current_ir_graph;
310 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
311 res = optimize_node(res);
312 IRN_VRFY_IRG(res, irg);
317 new_bd_Mul (dbg_info *db, ir_node *block,
318 ir_node *op1, ir_node *op2, ir_mode *mode)
322 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
327 res = optimize_node(res);
328 IRN_VRFY_IRG(res, irg);
333 new_bd_Quot (dbg_info *db, ir_node *block,
334 ir_node *memop, ir_node *op1, ir_node *op2)
338 ir_graph *irg = current_ir_graph;
343 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
344 res = optimize_node(res);
345 IRN_VRFY_IRG(res, irg);
350 new_bd_DivMod (dbg_info *db, ir_node *block,
351 ir_node *memop, ir_node *op1, ir_node *op2)
355 ir_graph *irg = current_ir_graph;
360 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
367 new_bd_Div (dbg_info *db, ir_node *block,
368 ir_node *memop, ir_node *op1, ir_node *op2)
372 ir_graph *irg = current_ir_graph;
377 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
378 res = optimize_node(res);
379 IRN_VRFY_IRG(res, irg);
384 new_bd_Mod (dbg_info *db, ir_node *block,
385 ir_node *memop, ir_node *op1, ir_node *op2)
389 ir_graph *irg = current_ir_graph;
394 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
395 res = optimize_node(res);
396 IRN_VRFY_IRG(res, irg);
401 new_bd_And (dbg_info *db, ir_node *block,
402 ir_node *op1, ir_node *op2, ir_mode *mode)
406 ir_graph *irg = current_ir_graph;
410 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_bd_Or (dbg_info *db, ir_node *block,
418 ir_node *op1, ir_node *op2, ir_mode *mode)
422 ir_graph *irg = current_ir_graph;
426 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
427 res = optimize_node(res);
428 IRN_VRFY_IRG(res, irg);
433 new_bd_Eor (dbg_info *db, ir_node *block,
434 ir_node *op1, ir_node *op2, ir_mode *mode)
438 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
443 res = optimize_node (res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_Not (dbg_info *db, ir_node *block,
450 ir_node *op, ir_mode *mode)
453 ir_graph *irg = current_ir_graph;
455 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_bd_Shl (dbg_info *db, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
467 ir_graph *irg = current_ir_graph;
471 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Shr (dbg_info *db, ir_node *block,
479 ir_node *op, ir_node *k, ir_mode *mode)
483 ir_graph *irg = current_ir_graph;
487 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_bd_Shrs (dbg_info *db, ir_node *block,
495 ir_node *op, ir_node *k, ir_mode *mode)
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
504 res = optimize_node(res);
505 IRN_VRFY_IRG(res, irg);
510 new_bd_Rot (dbg_info *db, ir_node *block,
511 ir_node *op, ir_node *k, ir_mode *mode)
515 ir_graph *irg = current_ir_graph;
519 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
520 res = optimize_node(res);
521 IRN_VRFY_IRG(res, irg);
526 new_bd_Abs (dbg_info *db, ir_node *block,
527 ir_node *op, ir_mode *mode)
530 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
533 res = optimize_node (res);
534 IRN_VRFY_IRG(res, irg);
539 new_bd_Cmp (dbg_info *db, ir_node *block,
540 ir_node *op1, ir_node *op2)
544 ir_graph *irg = current_ir_graph;
549 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Jmp (dbg_info *db, ir_node *block)
559 ir_graph *irg = current_ir_graph;
561 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
562 res = optimize_node (res);
563 IRN_VRFY_IRG (res, irg);
568 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
571 ir_graph *irg = current_ir_graph;
573 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
574 res = optimize_node (res);
575 IRN_VRFY_IRG (res, irg);
577 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
583 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
586 ir_graph *irg = current_ir_graph;
588 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
589 res->attr.c.kind = dense;
590 res->attr.c.default_proj = 0;
591 res->attr.c.pred = COND_JMP_PRED_NONE;
592 res = optimize_node (res);
593 IRN_VRFY_IRG(res, irg);
598 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
599 ir_node *callee, int arity, ir_node **in, type *tp)
604 ir_graph *irg = current_ir_graph;
607 NEW_ARR_A(ir_node *, r_in, r_arity);
610 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
612 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
614 assert((get_unknown_type() == tp) || is_Method_type(tp));
615 set_Call_type(res, tp);
616 res->attr.call.exc.pin_state = op_pin_state_pinned;
617 res->attr.call.callee_arr = NULL;
618 res = optimize_node(res);
619 IRN_VRFY_IRG(res, irg);
624 new_bd_Return (dbg_info *db, ir_node *block,
625 ir_node *store, int arity, ir_node **in)
630 ir_graph *irg = current_ir_graph;
633 NEW_ARR_A (ir_node *, r_in, r_arity);
635 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
636 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
647 ir_graph *irg = current_ir_graph;
651 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
658 new_bd_Load (dbg_info *db, ir_node *block,
659 ir_node *store, ir_node *adr, ir_mode *mode)
663 ir_graph *irg = current_ir_graph;
667 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
668 res->attr.load.exc.pin_state = op_pin_state_pinned;
669 res->attr.load.load_mode = mode;
670 res->attr.load.volatility = volatility_non_volatile;
671 res = optimize_node(res);
672 IRN_VRFY_IRG(res, irg);
677 new_bd_Store (dbg_info *db, ir_node *block,
678 ir_node *store, ir_node *adr, ir_node *val)
682 ir_graph *irg = current_ir_graph;
687 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
688 res->attr.store.exc.pin_state = op_pin_state_pinned;
689 res->attr.store.volatility = volatility_non_volatile;
690 res = optimize_node(res);
691 IRN_VRFY_IRG(res, irg);
696 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
697 ir_node *size, type *alloc_type, where_alloc where)
701 ir_graph *irg = current_ir_graph;
705 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
706 res->attr.a.exc.pin_state = op_pin_state_pinned;
707 res->attr.a.where = where;
708 res->attr.a.type = alloc_type;
709 res = optimize_node(res);
710 IRN_VRFY_IRG(res, irg);
715 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
716 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
720 ir_graph *irg = current_ir_graph;
725 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
726 res->attr.f.where = where;
727 res->attr.f.type = free_type;
728 res = optimize_node(res);
729 IRN_VRFY_IRG(res, irg);
734 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
735 int arity, ir_node **in, entity *ent)
740 ir_graph *irg = current_ir_graph;
742 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
745 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
748 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
750 * FIXM: Sel's can select functions which should be of mode mode_P_code.
752 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
753 res->attr.s.ent = ent;
754 res = optimize_node(res);
755 IRN_VRFY_IRG(res, irg);
760 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
761 ir_node *objptr, type *ent)
766 ir_graph *irg = current_ir_graph;
769 NEW_ARR_A(ir_node *, r_in, r_arity);
773 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
774 res->attr.io.ent = ent;
776 /* res = optimize(res); */
777 IRN_VRFY_IRG(res, irg);
782 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
783 symconst_kind symkind, type *tp) {
786 ir_graph *irg = current_ir_graph;
788 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
789 mode = mode_P_data; /* FIXME: can be mode_P_code */
793 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
795 res->attr.i.num = symkind;
796 res->attr.i.sym = value;
799 res = optimize_node(res);
800 IRN_VRFY_IRG(res, irg);
805 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
806 symconst_kind symkind)
808 ir_graph *irg = current_ir_graph;
810 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
815 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
818 ir_graph *irg = current_ir_graph;
820 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
821 res = optimize_node(res);
822 IRN_VRFY_IRG(res, irg);
827 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
829 ir_node *in[2], *res;
830 ir_graph *irg = current_ir_graph;
834 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
835 res->attr.confirm_cmp = cmp;
836 res = optimize_node (res);
837 IRN_VRFY_IRG(res, irg);
841 /* this function is often called with current_ir_graph unset */
843 new_bd_Unknown (ir_mode *m)
846 ir_graph *irg = current_ir_graph;
848 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
849 res = optimize_node(res);
854 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
858 ir_graph *irg = current_ir_graph;
860 in[0] = get_Call_ptr(call);
861 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
862 /* res->attr.callbegin.irg = irg; */
863 res->attr.callbegin.call = call;
864 res = optimize_node(res);
865 IRN_VRFY_IRG(res, irg);
870 new_bd_EndReg (dbg_info *db, ir_node *block)
873 ir_graph *irg = current_ir_graph;
875 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
877 IRN_VRFY_IRG(res, irg);
882 new_bd_EndExcept (dbg_info *db, ir_node *block)
885 ir_graph *irg = current_ir_graph;
887 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
888 irg->end_except = res;
889 IRN_VRFY_IRG (res, irg);
894 new_bd_Break (dbg_info *db, ir_node *block)
897 ir_graph *irg = current_ir_graph;
899 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
900 res = optimize_node(res);
901 IRN_VRFY_IRG(res, irg);
906 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
910 ir_graph *irg = current_ir_graph;
912 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
913 res->attr.filter.proj = proj;
914 res->attr.filter.in_cg = NULL;
915 res->attr.filter.backedge = NULL;
918 assert(get_Proj_pred(res));
919 assert(get_nodes_block(get_Proj_pred(res)));
921 res = optimize_node(res);
922 IRN_VRFY_IRG(res, irg);
927 new_bd_Mux (dbg_info *db, ir_node *block,
928 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
932 ir_graph *irg = current_ir_graph;
938 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
941 res = optimize_node(res);
942 IRN_VRFY_IRG(res, irg);
947 new_bd_CopyB (dbg_info *db, ir_node *block,
948 ir_node *store, ir_node *dst, ir_node *src, type *data_type)
952 ir_graph *irg = current_ir_graph;
958 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
960 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
961 res->attr.copyb.data_type = data_type;
962 res = optimize_node(res);
963 IRN_VRFY_IRG(res, irg);
967 /* --------------------------------------------- */
968 /* private interfaces, for professional use only */
969 /* --------------------------------------------- */
971 /* Constructs a Block with a fixed number of predecessors.
972 Does not set current_block. Can not be used with automatic
973 Phi node construction. */
975 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
977 ir_graph *rem = current_ir_graph;
980 current_ir_graph = irg;
981 res = new_bd_Block (db, arity, in);
982 current_ir_graph = rem;
988 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
990 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Start (db, block);
995 current_ir_graph = rem;
1001 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
1004 ir_graph *rem = current_ir_graph;
1006 current_ir_graph = rem;
1007 res = new_bd_End (db, block);
1008 current_ir_graph = rem;
1013 /* Creates a Phi node with all predecessors. Calling this constructor
1014 is only allowed if the corresponding block is mature. */
1016 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
1019 ir_graph *rem = current_ir_graph;
1021 current_ir_graph = irg;
1022 res = new_bd_Phi (db, block,arity, in, mode);
1023 current_ir_graph = rem;
1029 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
1032 ir_graph *rem = current_ir_graph;
1034 current_ir_graph = irg;
1035 res = new_bd_Const_type (db, block, mode, con, tp);
1036 current_ir_graph = rem;
1042 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
1045 ir_graph *rem = current_ir_graph;
1047 current_ir_graph = irg;
1048 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
1049 current_ir_graph = rem;
1055 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
1057 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
1061 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
1064 ir_graph *rem = current_ir_graph;
1066 current_ir_graph = irg;
1067 res = new_bd_Id(db, block, val, mode);
1068 current_ir_graph = rem;
1074 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1078 ir_graph *rem = current_ir_graph;
1080 current_ir_graph = irg;
1081 res = new_bd_Proj(db, block, arg, mode, proj);
1082 current_ir_graph = rem;
1088 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_defaultProj(db, block, arg, max_proj);
1096 current_ir_graph = rem;
1102 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1105 ir_graph *rem = current_ir_graph;
1107 current_ir_graph = irg;
1108 res = new_bd_Conv(db, block, op, mode);
1109 current_ir_graph = rem;
1115 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
1118 ir_graph *rem = current_ir_graph;
1120 current_ir_graph = irg;
1121 res = new_bd_Cast(db, block, op, to_tp);
1122 current_ir_graph = rem;
1128 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1131 ir_graph *rem = current_ir_graph;
1133 current_ir_graph = irg;
1134 res = new_bd_Tuple(db, block, arity, in);
1135 current_ir_graph = rem;
1141 new_rd_Add (dbg_info *db, ir_graph *irg, ir_node *block,
1142 ir_node *op1, ir_node *op2, ir_mode *mode)
1145 ir_graph *rem = current_ir_graph;
1147 current_ir_graph = irg;
1148 res = new_bd_Add(db, block, op1, op2, mode);
1149 current_ir_graph = rem;
1155 new_rd_Sub (dbg_info *db, ir_graph *irg, ir_node *block,
1156 ir_node *op1, ir_node *op2, ir_mode *mode)
1159 ir_graph *rem = current_ir_graph;
1161 current_ir_graph = irg;
1162 res = new_bd_Sub(db, block, op1, op2, mode);
1163 current_ir_graph = rem;
1169 new_rd_Minus (dbg_info *db, ir_graph *irg, ir_node *block,
1170 ir_node *op, ir_mode *mode)
1173 ir_graph *rem = current_ir_graph;
1175 current_ir_graph = irg;
1176 res = new_bd_Minus(db, block, op, mode);
1177 current_ir_graph = rem;
1183 new_rd_Mul (dbg_info *db, ir_graph *irg, ir_node *block,
1184 ir_node *op1, ir_node *op2, ir_mode *mode)
1187 ir_graph *rem = current_ir_graph;
1189 current_ir_graph = irg;
1190 res = new_bd_Mul(db, block, op1, op2, mode);
1191 current_ir_graph = rem;
1197 new_rd_Quot (dbg_info *db, ir_graph *irg, ir_node *block,
1198 ir_node *memop, ir_node *op1, ir_node *op2)
1201 ir_graph *rem = current_ir_graph;
1203 current_ir_graph = irg;
1204 res = new_bd_Quot(db, block, memop, op1, op2);
1205 current_ir_graph = rem;
1211 new_rd_DivMod (dbg_info *db, ir_graph *irg, ir_node *block,
1212 ir_node *memop, ir_node *op1, ir_node *op2)
1215 ir_graph *rem = current_ir_graph;
1217 current_ir_graph = irg;
1218 res = new_bd_DivMod(db, block, memop, op1, op2);
1219 current_ir_graph = rem;
1225 new_rd_Div (dbg_info *db, ir_graph *irg, ir_node *block,
1226 ir_node *memop, ir_node *op1, ir_node *op2)
1229 ir_graph *rem = current_ir_graph;
1231 current_ir_graph = irg;
1232 res = new_bd_Div (db, block, memop, op1, op2);
1233 current_ir_graph =rem;
1239 new_rd_Mod (dbg_info *db, ir_graph *irg, ir_node *block,
1240 ir_node *memop, ir_node *op1, ir_node *op2)
1243 ir_graph *rem = current_ir_graph;
1245 current_ir_graph = irg;
1246 res = new_bd_Mod(db, block, memop, op1, op2);
1247 current_ir_graph = rem;
1253 new_rd_And (dbg_info *db, ir_graph *irg, ir_node *block,
1254 ir_node *op1, ir_node *op2, ir_mode *mode)
1257 ir_graph *rem = current_ir_graph;
1259 current_ir_graph = irg;
1260 res = new_bd_And(db, block, op1, op2, mode);
1261 current_ir_graph = rem;
1267 new_rd_Or (dbg_info *db, ir_graph *irg, ir_node *block,
1268 ir_node *op1, ir_node *op2, ir_mode *mode)
1271 ir_graph *rem = current_ir_graph;
1273 current_ir_graph = irg;
1274 res = new_bd_Or(db, block, op1, op2, mode);
1275 current_ir_graph = rem;
1281 new_rd_Eor (dbg_info *db, ir_graph *irg, ir_node *block,
1282 ir_node *op1, ir_node *op2, ir_mode *mode)
1285 ir_graph *rem = current_ir_graph;
1287 current_ir_graph = irg;
1288 res = new_bd_Eor(db, block, op1, op2, mode);
1289 current_ir_graph = rem;
1295 new_rd_Not (dbg_info *db, ir_graph *irg, ir_node *block,
1296 ir_node *op, ir_mode *mode)
1299 ir_graph *rem = current_ir_graph;
1301 current_ir_graph = irg;
1302 res = new_bd_Not(db, block, op, mode);
1303 current_ir_graph = rem;
1309 new_rd_Shl (dbg_info *db, ir_graph *irg, ir_node *block,
1310 ir_node *op, ir_node *k, ir_mode *mode)
1313 ir_graph *rem = current_ir_graph;
1315 current_ir_graph = irg;
1316 res = new_bd_Shl (db, block, op, k, mode);
1317 current_ir_graph = rem;
1323 new_rd_Shr (dbg_info *db, ir_graph *irg, ir_node *block,
1324 ir_node *op, ir_node *k, ir_mode *mode)
1327 ir_graph *rem = current_ir_graph;
1329 current_ir_graph = irg;
1330 res = new_bd_Shr(db, block, op, k, mode);
1331 current_ir_graph = rem;
1337 new_rd_Shrs (dbg_info *db, ir_graph *irg, ir_node *block,
1338 ir_node *op, ir_node *k, ir_mode *mode)
1341 ir_graph *rem = current_ir_graph;
1343 current_ir_graph = irg;
1344 res = new_bd_Shrs(db, block, op, k, mode);
1345 current_ir_graph = rem;
1351 new_rd_Rot (dbg_info *db, ir_graph *irg, ir_node *block,
1352 ir_node *op, ir_node *k, ir_mode *mode)
1355 ir_graph *rem = current_ir_graph;
1357 current_ir_graph = irg;
1358 res = new_bd_Rot(db, block, op, k, mode);
1359 current_ir_graph = rem;
1365 new_rd_Abs (dbg_info *db, ir_graph *irg, ir_node *block,
1366 ir_node *op, ir_mode *mode)
1369 ir_graph *rem = current_ir_graph;
1371 current_ir_graph = irg;
1372 res = new_bd_Abs(db, block, op, mode);
1373 current_ir_graph = rem;
1379 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1380 ir_node *op1, ir_node *op2)
1383 ir_graph *rem = current_ir_graph;
1385 current_ir_graph = irg;
1386 res = new_bd_Cmp(db, block, op1, op2);
1387 current_ir_graph = rem;
1393 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1396 ir_graph *rem = current_ir_graph;
1398 current_ir_graph = irg;
1399 res = new_bd_Jmp(db, block);
1400 current_ir_graph = rem;
1406 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1409 ir_graph *rem = current_ir_graph;
1411 current_ir_graph = irg;
1412 res = new_bd_IJmp(db, block, tgt);
1413 current_ir_graph = rem;
1419 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1422 ir_graph *rem = current_ir_graph;
1424 current_ir_graph = irg;
1425 res = new_bd_Cond(db, block, c);
1426 current_ir_graph = rem;
1432 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1433 ir_node *callee, int arity, ir_node **in, type *tp)
1436 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1439 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1440 current_ir_graph = rem;
1446 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1447 ir_node *store, int arity, ir_node **in)
1450 ir_graph *rem = current_ir_graph;
1452 current_ir_graph = irg;
1453 res = new_bd_Return(db, block, store, arity, in);
1454 current_ir_graph = rem;
1460 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1463 ir_graph *rem = current_ir_graph;
1465 current_ir_graph = irg;
1466 res = new_bd_Raise(db, block, store, obj);
1467 current_ir_graph = rem;
1473 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1474 ir_node *store, ir_node *adr, ir_mode *mode)
1477 ir_graph *rem = current_ir_graph;
1479 current_ir_graph = irg;
1480 res = new_bd_Load(db, block, store, adr, mode);
1481 current_ir_graph = rem;
1487 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1488 ir_node *store, ir_node *adr, ir_node *val)
1491 ir_graph *rem = current_ir_graph;
1493 current_ir_graph = irg;
1494 res = new_bd_Store(db, block, store, adr, val);
1495 current_ir_graph = rem;
1501 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1502 ir_node *size, type *alloc_type, where_alloc where)
1505 ir_graph *rem = current_ir_graph;
1507 current_ir_graph = irg;
1508 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1509 current_ir_graph = rem;
1515 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1516 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
1519 ir_graph *rem = current_ir_graph;
1521 current_ir_graph = irg;
1522 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1523 current_ir_graph = rem;
1529 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1530 ir_node *store, ir_node *objptr, entity *ent)
1533 ir_graph *rem = current_ir_graph;
1535 current_ir_graph = irg;
1536 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1537 current_ir_graph = rem;
1543 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1544 int arity, ir_node **in, entity *ent)
1547 ir_graph *rem = current_ir_graph;
1549 current_ir_graph = irg;
1550 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1551 current_ir_graph = rem;
1557 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1558 ir_node *objptr, type *ent)
1561 ir_graph *rem = current_ir_graph;
1563 current_ir_graph = irg;
1564 res = new_bd_InstOf(db, block, store, objptr, ent);
1565 current_ir_graph = rem;
1571 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1572 symconst_kind symkind, type *tp)
1575 ir_graph *rem = current_ir_graph;
1577 current_ir_graph = irg;
1578 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1579 current_ir_graph = rem;
1585 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1586 symconst_kind symkind)
1588 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1592 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp)
1594 symconst_symbol sym = {(type *)symbol};
1595 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1598 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
1599 symconst_symbol sym = {(type *)symbol};
1600 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1603 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1604 symconst_symbol sym = {symbol};
1605 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1608 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
1609 symconst_symbol sym = {symbol};
1610 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1614 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1617 ir_graph *rem = current_ir_graph;
1619 current_ir_graph = irg;
1620 res = new_bd_Sync(db, block, arity, in);
1621 current_ir_graph = rem;
1627 new_rd_Bad (ir_graph *irg)
1633 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1636 ir_graph *rem = current_ir_graph;
1638 current_ir_graph = irg;
1639 res = new_bd_Confirm(db, block, val, bound, cmp);
1640 current_ir_graph = rem;
1645 /* this function is often called with current_ir_graph unset */
1647 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1650 ir_graph *rem = current_ir_graph;
1652 current_ir_graph = irg;
1653 res = new_bd_Unknown(m);
1654 current_ir_graph = rem;
1660 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1663 ir_graph *rem = current_ir_graph;
1665 current_ir_graph = irg;
1666 res = new_bd_CallBegin(db, block, call);
1667 current_ir_graph = rem;
1673 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1677 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1679 IRN_VRFY_IRG(res, irg);
1684 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1688 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1689 irg->end_except = res;
1690 IRN_VRFY_IRG (res, irg);
1695 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1698 ir_graph *rem = current_ir_graph;
1700 current_ir_graph = irg;
1701 res = new_bd_Break(db, block);
1702 current_ir_graph = rem;
1708 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1712 ir_graph *rem = current_ir_graph;
1714 current_ir_graph = irg;
1715 res = new_bd_Filter(db, block, arg, mode, proj);
1716 current_ir_graph = rem;
1722 new_rd_NoMem (ir_graph *irg) {
1727 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1728 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1731 ir_graph *rem = current_ir_graph;
1733 current_ir_graph = irg;
1734 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1735 current_ir_graph = rem;
1740 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1741 ir_node *store, ir_node *dst, ir_node *src, type *data_type)
1744 ir_graph *rem = current_ir_graph;
1746 current_ir_graph = irg;
1747 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1748 current_ir_graph = rem;
1753 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1754 return new_rd_Block(NULL, irg, arity, in);
1756 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1757 return new_rd_Start(NULL, irg, block);
1759 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1760 return new_rd_End(NULL, irg, block);
1762 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1763 return new_rd_Jmp(NULL, irg, block);
1765 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1766 return new_rd_IJmp(NULL, irg, block, tgt);
1768 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1769 return new_rd_Cond(NULL, irg, block, c);
1771 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1772 ir_node *store, int arity, ir_node **in) {
1773 return new_rd_Return(NULL, irg, block, store, arity, in);
1775 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1776 ir_node *store, ir_node *obj) {
1777 return new_rd_Raise(NULL, irg, block, store, obj);
1779 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1780 ir_mode *mode, tarval *con) {
1781 return new_rd_Const(NULL, irg, block, mode, con);
1784 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1785 ir_mode *mode, long value) {
1786 return new_rd_Const_long(NULL, irg, block, mode, value);
1789 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1790 ir_mode *mode, tarval *con, type *tp) {
1791 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1794 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1795 symconst_symbol value, symconst_kind symkind) {
1796 return new_rd_SymConst(NULL, irg, block, value, symkind);
1798 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1799 ir_node *objptr, int n_index, ir_node **index,
1801 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1803 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1805 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
1807 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1808 ir_node *callee, int arity, ir_node **in,
1810 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1812 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1813 ir_node *op1, ir_node *op2, ir_mode *mode) {
1814 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1816 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1817 ir_node *op1, ir_node *op2, ir_mode *mode) {
1818 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1820 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1821 ir_node *op, ir_mode *mode) {
1822 return new_rd_Minus(NULL, irg, block, op, mode);
1824 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1825 ir_node *op1, ir_node *op2, ir_mode *mode) {
1826 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1828 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1829 ir_node *memop, ir_node *op1, ir_node *op2) {
1830 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1832 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1833 ir_node *memop, ir_node *op1, ir_node *op2) {
1834 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1836 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1837 ir_node *memop, ir_node *op1, ir_node *op2) {
1838 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1840 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1841 ir_node *memop, ir_node *op1, ir_node *op2) {
1842 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1844 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1845 ir_node *op, ir_mode *mode) {
1846 return new_rd_Abs(NULL, irg, block, op, mode);
1848 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1849 ir_node *op1, ir_node *op2, ir_mode *mode) {
1850 return new_rd_And(NULL, irg, block, op1, op2, mode);
1852 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1853 ir_node *op1, ir_node *op2, ir_mode *mode) {
1854 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1856 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1857 ir_node *op1, ir_node *op2, ir_mode *mode) {
1858 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1860 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1861 ir_node *op, ir_mode *mode) {
1862 return new_rd_Not(NULL, irg, block, op, mode);
1864 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1865 ir_node *op1, ir_node *op2) {
1866 return new_rd_Cmp(NULL, irg, block, op1, op2);
1868 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1869 ir_node *op, ir_node *k, ir_mode *mode) {
1870 return new_rd_Shl(NULL, irg, block, op, k, mode);
1872 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1873 ir_node *op, ir_node *k, ir_mode *mode) {
1874 return new_rd_Shr(NULL, irg, block, op, k, mode);
1876 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1877 ir_node *op, ir_node *k, ir_mode *mode) {
1878 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1880 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1881 ir_node *op, ir_node *k, ir_mode *mode) {
1882 return new_rd_Rot(NULL, irg, block, op, k, mode);
1884 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1885 ir_node *op, ir_mode *mode) {
1886 return new_rd_Conv(NULL, irg, block, op, mode);
1888 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1889 return new_rd_Cast(NULL, irg, block, op, to_tp);
1891 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1892 ir_node **in, ir_mode *mode) {
1893 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1895 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1896 ir_node *store, ir_node *adr, ir_mode *mode) {
1897 return new_rd_Load(NULL, irg, block, store, adr, mode);
1899 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1900 ir_node *store, ir_node *adr, ir_node *val) {
1901 return new_rd_Store(NULL, irg, block, store, adr, val);
1903 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1904 ir_node *size, type *alloc_type, where_alloc where) {
1905 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1907 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1908 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1909 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1911 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1912 return new_rd_Sync(NULL, irg, block, arity, in);
1914 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1915 ir_mode *mode, long proj) {
1916 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1918 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1920 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1922 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1923 int arity, ir_node **in) {
1924 return new_rd_Tuple(NULL, irg, block, arity, in );
1926 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1927 ir_node *val, ir_mode *mode) {
1928 return new_rd_Id(NULL, irg, block, val, mode);
1930 ir_node *new_r_Bad (ir_graph *irg) {
1931 return new_rd_Bad(irg);
1933 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1934 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1936 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1937 return new_rd_Unknown(irg, m);
1939 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1940 return new_rd_CallBegin(NULL, irg, block, callee);
1942 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1943 return new_rd_EndReg(NULL, irg, block);
1945 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1946 return new_rd_EndExcept(NULL, irg, block);
1948 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1949 return new_rd_Break(NULL, irg, block);
1951 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1952 ir_mode *mode, long proj) {
1953 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1955 ir_node *new_r_NoMem (ir_graph *irg) {
1956 return new_rd_NoMem(irg);
1958 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1959 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1960 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1963 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1964 ir_node *store, ir_node *dst, ir_node *src, type *data_type) {
1965 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1968 /** ********************/
1969 /** public interfaces */
1970 /** construction tools */
1974 * - create a new Start node in the current block
1976 * @return s - pointer to the created Start node
1981 new_d_Start (dbg_info *db)
1985 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1986 op_Start, mode_T, 0, NULL);
1987 /* res->attr.start.irg = current_ir_graph; */
1989 res = optimize_node(res);
1990 IRN_VRFY_IRG(res, current_ir_graph);
1995 new_d_End (dbg_info *db)
1998 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1999 op_End, mode_X, -1, NULL);
2000 res = optimize_node(res);
2001 IRN_VRFY_IRG(res, current_ir_graph);
2006 /* Constructs a Block with a fixed number of predecessors.
2007 Does set current_block. Can be used with automatic Phi
2008 node construction. */
2010 new_d_Block (dbg_info *db, int arity, ir_node **in)
2014 bool has_unknown = false;
2016 res = new_bd_Block(db, arity, in);
2018 /* Create and initialize array for Phi-node construction. */
2019 if (get_irg_phase_state(current_ir_graph) == phase_building) {
2020 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2021 current_ir_graph->n_loc);
2022 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2025 for (i = arity-1; i >= 0; i--)
2026 if (get_irn_op(in[i]) == op_Unknown) {
2031 if (!has_unknown) res = optimize_node(res);
2032 current_ir_graph->current_block = res;
2034 IRN_VRFY_IRG(res, current_ir_graph);
2039 /* ***********************************************************************/
2040 /* Methods necessary for automatic Phi node creation */
2042 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2043 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2044 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2045 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2047 Call Graph: ( A ---> B == A "calls" B)
2049 get_value mature_immBlock
2057 get_r_value_internal |
2061 new_rd_Phi0 new_rd_Phi_in
2063 * *************************************************************************** */
2065 /** Creates a Phi node with 0 predecessors */
2066 static INLINE ir_node *
2067 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2071 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
2072 IRN_VRFY_IRG(res, irg);
2076 /* There are two implementations of the Phi node construction. The first
2077 is faster, but does not work for blocks with more than 2 predecessors.
2078 The second works always but is slower and causes more unnecessary Phi
2080 Select the implementations by the following preprocessor flag set in
2082 #if USE_FAST_PHI_CONSTRUCTION
2084 /* This is a stack used for allocating and deallocating nodes in
2085 new_rd_Phi_in. The original implementation used the obstack
2086 to model this stack, now it is explicit. This reduces side effects.
2088 #if USE_EXPLICIT_PHI_IN_STACK
2090 new_Phi_in_stack(void) {
2093 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
2095 res->stack = NEW_ARR_F (ir_node *, 0);
2102 free_Phi_in_stack(Phi_in_stack *s) {
2103 DEL_ARR_F(s->stack);
2107 free_to_Phi_in_stack(ir_node *phi) {
2108 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
2109 current_ir_graph->Phi_in_stack->pos)
2110 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
2112 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
2114 (current_ir_graph->Phi_in_stack->pos)++;
2117 static INLINE ir_node *
2118 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
2119 int arity, ir_node **in) {
2121 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
2122 int pos = current_ir_graph->Phi_in_stack->pos;
2126 /* We need to allocate a new node */
2127 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
2128 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
2130 /* reuse the old node and initialize it again. */
2133 assert (res->kind == k_ir_node);
2134 assert (res->op == op_Phi);
2138 assert (arity >= 0);
2139 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
2140 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
2142 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
2144 (current_ir_graph->Phi_in_stack->pos)--;
2148 #endif /* USE_EXPLICIT_PHI_IN_STACK */
2150 /* Creates a Phi node with a given, fixed array **in of predecessors.
2151 If the Phi node is unnecessary, as the same value reaches the block
2152 through all control flow paths, it is eliminated and the value
2153 returned directly. This constructor is only intended for use in
2154 the automatic Phi node generation triggered by get_value or mature.
2155 The implementation is quite tricky and depends on the fact, that
2156 the nodes are allocated on a stack:
2157 The in array contains predecessors and NULLs. The NULLs appear,
2158 if get_r_value_internal, that computed the predecessors, reached
2159 the same block on two paths. In this case the same value reaches
2160 this block on both paths, there is no definition in between. We need
2161 not allocate a Phi where these path's merge, but we have to communicate
2162 this fact to the caller. This happens by returning a pointer to the
2163 node the caller _will_ allocate. (Yes, we predict the address. We can
2164 do so because the nodes are allocated on the obstack.) The caller then
2165 finds a pointer to itself and, when this routine is called again,
2168 static INLINE ir_node *
2169 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2172 ir_node *res, *known;
2174 /* Allocate a new node on the obstack. This can return a node to
2175 which some of the pointers in the in-array already point.
2176 Attention: the constructor copies the in array, i.e., the later
2177 changes to the array in this routine do not affect the
2178 constructed node! If the in array contains NULLs, there will be
2179 missing predecessors in the returned node. Is this a possible
2180 internal state of the Phi node generation? */
2181 #if USE_EXPLICIT_PHI_IN_STACK
2182 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
2184 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2185 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2188 /* The in-array can contain NULLs. These were returned by
2189 get_r_value_internal if it reached the same block/definition on a
2190 second path. The NULLs are replaced by the node itself to
2191 simplify the test in the next loop. */
2192 for (i = 0; i < ins; ++i) {
2197 /* This loop checks whether the Phi has more than one predecessor.
2198 If so, it is a real Phi node and we break the loop. Else the Phi
2199 node merges the same definition on several paths and therefore is
2201 for (i = 0; i < ins; ++i) {
2202 if (in[i] == res || in[i] == known)
2211 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2213 #if USE_EXPLICIT_PHI_IN_STACK
2214 free_to_Phi_in_stack(res);
2216 edges_node_deleted(res, current_ir_graph);
2217 obstack_free(current_ir_graph->obst, res);
2221 res = optimize_node (res);
2222 IRN_VRFY_IRG(res, irg);
2225 /* return the pointer to the Phi node. This node might be deallocated! */
2230 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2233 allocates and returns this node. The routine called to allocate the
2234 node might optimize it away and return a real value, or even a pointer
2235 to a deallocated Phi node on top of the obstack!
2236 This function is called with an in-array of proper size. **/
2238 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2240 ir_node *prevBlock, *res;
2243 /* This loop goes to all predecessor blocks of the block the Phi node is in
2244 and there finds the operands of the Phi node by calling
2245 get_r_value_internal. */
2246 for (i = 1; i <= ins; ++i) {
2247 assert (block->in[i]);
2248 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2250 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2253 /* After collecting all predecessors into the array nin a new Phi node
2254 with these predecessors is created. This constructor contains an
2255 optimization: If all predecessors of the Phi node are identical it
2256 returns the only operand instead of a new Phi node. If the value
2257 passes two different control flow edges without being defined, and
2258 this is the second path treated, a pointer to the node that will be
2259 allocated for the first path (recursion) is returned. We already
2260 know the address of this node, as it is the next node to be allocated
2261 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2262 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2264 /* Now we now the value for "pos" and can enter it in the array with
2265 all known local variables. Attention: this might be a pointer to
2266 a node, that later will be allocated!!! See new_rd_Phi_in.
2267 If this is called in mature, after some set_value in the same block,
2268 the proper value must not be overwritten:
2270 get_value (makes Phi0, put's it into graph_arr)
2271 set_value (overwrites Phi0 in graph_arr)
2272 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2275 if (!block->attr.block.graph_arr[pos]) {
2276 block->attr.block.graph_arr[pos] = res;
2278 /* printf(" value already computed by %s\n",
2279 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2285 /* This function returns the last definition of a variable. In case
2286 this variable was last defined in a previous block, Phi nodes are
2287 inserted. If the part of the firm graph containing the definition
2288 is not yet constructed, a dummy Phi node is returned. */
2290 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2293 /* There are 4 cases to treat.
2295 1. The block is not mature and we visit it the first time. We can not
2296 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2297 predecessors is returned. This node is added to the linked list (field
2298 "link") of the containing block to be completed when this block is
2299 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2302 2. The value is already known in this block, graph_arr[pos] is set and we
2303 visit the block the first time. We can return the value without
2304 creating any new nodes.
2306 3. The block is mature and we visit it the first time. A Phi node needs
2307 to be created (phi_merge). If the Phi is not needed, as all it's
2308 operands are the same value reaching the block through different
2309 paths, it's optimized away and the value itself is returned.
2311 4. The block is mature, and we visit it the second time. Now two
2312 subcases are possible:
2313 * The value was computed completely the last time we were here. This
2314 is the case if there is no loop. We can return the proper value.
2315 * The recursion that visited this node and set the flag did not
2316 return yet. We are computing a value in a loop and need to
2317 break the recursion without knowing the result yet.
2318 @@@ strange case. Straight forward we would create a Phi before
2319 starting the computation of it's predecessors. In this case we will
2320 find a Phi here in any case. The problem is that this implementation
2321 only creates a Phi after computing the predecessors, so that it is
2322 hard to compute self references of this Phi. @@@
2323 There is no simple check for the second subcase. Therefore we check
2324 for a second visit and treat all such cases as the second subcase.
2325 Anyways, the basic situation is the same: we reached a block
2326 on two paths without finding a definition of the value: No Phi
2327 nodes are needed on both paths.
2328 We return this information "Two paths, no Phi needed" by a very tricky
2329 implementation that relies on the fact that an obstack is a stack and
2330 will return a node with the same address on different allocations.
2331 Look also at phi_merge and new_rd_phi_in to understand this.
2332 @@@ Unfortunately this does not work, see testprogram
2333 three_cfpred_example.
2337 /* case 4 -- already visited. */
2338 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2340 /* visited the first time */
2341 set_irn_visited(block, get_irg_visited(current_ir_graph));
2343 /* Get the local valid value */
2344 res = block->attr.block.graph_arr[pos];
2346 /* case 2 -- If the value is actually computed, return it. */
2347 if (res) return res;
2349 if (block->attr.block.matured) { /* case 3 */
2351 /* The Phi has the same amount of ins as the corresponding block. */
2352 int ins = get_irn_arity(block);
2354 NEW_ARR_A (ir_node *, nin, ins);
2356 /* Phi merge collects the predecessors and then creates a node. */
2357 res = phi_merge (block, pos, mode, nin, ins);
2359 } else { /* case 1 */
2360 /* The block is not mature, we don't know how many in's are needed. A Phi
2361 with zero predecessors is created. Such a Phi node is called Phi0
2362 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2363 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2365 The Phi0 has to remember the pos of it's internal value. If the real
2366 Phi is computed, pos is used to update the array with the local
2369 res = new_rd_Phi0 (current_ir_graph, block, mode);
2370 res->attr.phi0_pos = pos;
2371 res->link = block->link;
2375 /* If we get here, the frontend missed a use-before-definition error */
2378 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2379 assert (mode->code >= irm_F && mode->code <= irm_P);
2380 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2381 tarval_mode_null[mode->code]);
2384 /* The local valid value is available now. */
2385 block->attr.block.graph_arr[pos] = res;
2393 it starts the recursion. This causes an Id at the entry of
2394 every block that has no definition of the value! **/
2396 #if USE_EXPLICIT_PHI_IN_STACK
2398 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2399 void free_Phi_in_stack(Phi_in_stack *s) { }
2402 static INLINE ir_node *
2403 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2404 ir_node **in, int ins, ir_node *phi0)
2407 ir_node *res, *known;
2409 /* Allocate a new node on the obstack. The allocation copies the in
2411 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2412 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2414 /* This loop checks whether the Phi has more than one predecessor.
2415 If so, it is a real Phi node and we break the loop. Else the
2416 Phi node merges the same definition on several paths and therefore
2417 is not needed. Don't consider Bad nodes! */
2419 for (i=0; i < ins; ++i)
2423 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2425 /* Optimize self referencing Phis: We can't detect them yet properly, as
2426 they still refer to the Phi0 they will replace. So replace right now. */
2427 if (phi0 && in[i] == phi0) in[i] = res;
2429 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2437 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2440 edges_node_deleted(res, current_ir_graph);
2441 obstack_free (current_ir_graph->obst, res);
2442 if (is_Phi(known)) {
2443 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2444 order, an enclosing Phi know may get superfluous. */
2445 res = optimize_in_place_2(known);
2447 exchange(known, res);
2453 /* A undefined value, e.g., in unreachable code. */
2457 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2458 IRN_VRFY_IRG(res, irg);
2459 /* Memory Phis in endless loops must be kept alive.
2460 As we can't distinguish these easily we keep all of them alive. */
2461 if ((res->op == op_Phi) && (mode == mode_M))
2462 add_End_keepalive(irg->end, res);
2469 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2471 #if PRECISE_EXC_CONTEXT
2473 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2475 /* Construct a new frag_array for node n.
2476 Copy the content from the current graph_arr of the corresponding block:
2477 this is the current state.
2478 Set ProjM(n) as current memory state.
2479 Further the last entry in frag_arr of current block points to n. This
2480 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2482 static INLINE ir_node ** new_frag_arr (ir_node *n)
2487 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2488 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2489 sizeof(ir_node *)*current_ir_graph->n_loc);
2491 /* turn off optimization before allocating Proj nodes, as res isn't
2493 opt = get_opt_optimize(); set_optimize(0);
2494 /* Here we rely on the fact that all frag ops have Memory as first result! */
2495 if (get_irn_op(n) == op_Call)
2496 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2498 assert((pn_Quot_M == pn_DivMod_M) &&
2499 (pn_Quot_M == pn_Div_M) &&
2500 (pn_Quot_M == pn_Mod_M) &&
2501 (pn_Quot_M == pn_Load_M) &&
2502 (pn_Quot_M == pn_Store_M) &&
2503 (pn_Quot_M == pn_Alloc_M) );
2504 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2508 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2513 * returns the frag_arr from a node
2515 static INLINE ir_node **
2516 get_frag_arr (ir_node *n) {
2517 switch (get_irn_opcode(n)) {
2519 return n->attr.call.exc.frag_arr;
2521 return n->attr.a.exc.frag_arr;
2523 return n->attr.load.exc.frag_arr;
2525 return n->attr.store.exc.frag_arr;
2527 return n->attr.except.frag_arr;
2532 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2534 if (!frag_arr[pos]) frag_arr[pos] = val;
2535 if (frag_arr[current_ir_graph->n_loc - 1]) {
2536 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2537 assert(arr != frag_arr && "Endless recursion detected");
2538 set_frag_value(arr, pos, val);
2543 for (i = 0; i < 1000; ++i) {
2544 if (!frag_arr[pos]) {
2545 frag_arr[pos] = val;
2547 if (frag_arr[current_ir_graph->n_loc - 1]) {
2548 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2554 assert(0 && "potential endless recursion");
2559 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2563 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2565 frag_arr = get_frag_arr(cfOp);
2566 res = frag_arr[pos];
2568 if (block->attr.block.graph_arr[pos]) {
2569 /* There was a set_value after the cfOp and no get_value before that
2570 set_value. We must build a Phi node now. */
2571 if (block->attr.block.matured) {
2572 int ins = get_irn_arity(block);
2574 NEW_ARR_A (ir_node *, nin, ins);
2575 res = phi_merge(block, pos, mode, nin, ins);
2577 res = new_rd_Phi0 (current_ir_graph, block, mode);
2578 res->attr.phi0_pos = pos;
2579 res->link = block->link;
2583 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2584 but this should be better: (remove comment if this works) */
2585 /* It's a Phi, we can write this into all graph_arrs with NULL */
2586 set_frag_value(block->attr.block.graph_arr, pos, res);
2588 res = get_r_value_internal(block, pos, mode);
2589 set_frag_value(block->attr.block.graph_arr, pos, res);
2597 computes the predecessors for the real phi node, and then
2598 allocates and returns this node. The routine called to allocate the
2599 node might optimize it away and return a real value.
2600 This function must be called with an in-array of proper size. **/
2602 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2604 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2607 /* If this block has no value at pos create a Phi0 and remember it
2608 in graph_arr to break recursions.
2609 Else we may not set graph_arr as there a later value is remembered. */
2611 if (!block->attr.block.graph_arr[pos]) {
2612 if (block == get_irg_start_block(current_ir_graph)) {
2613 /* Collapsing to Bad tarvals is no good idea.
2614 So we call a user-supplied routine here that deals with this case as
2615 appropriate for the given language. Sorrily the only help we can give
2616 here is the position.
2618 Even if all variables are defined before use, it can happen that
2619 we get to the start block, if a Cond has been replaced by a tuple
2620 (bad, jmp). In this case we call the function needlessly, eventually
2621 generating an non existent error.
2622 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2625 if (default_initialize_local_variable) {
2626 ir_node *rem = get_cur_block();
2628 set_cur_block(block);
2629 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2633 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2634 /* We don't need to care about exception ops in the start block.
2635 There are none by definition. */
2636 return block->attr.block.graph_arr[pos];
2638 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2639 block->attr.block.graph_arr[pos] = phi0;
2640 #if PRECISE_EXC_CONTEXT
2641 if (get_opt_precise_exc_context()) {
2642 /* Set graph_arr for fragile ops. Also here we should break recursion.
2643 We could choose a cyclic path through an cfop. But the recursion would
2644 break at some point. */
2645 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2651 /* This loop goes to all predecessor blocks of the block the Phi node
2652 is in and there finds the operands of the Phi node by calling
2653 get_r_value_internal. */
2654 for (i = 1; i <= ins; ++i) {
2655 prevCfOp = skip_Proj(block->in[i]);
2657 if (is_Bad(prevCfOp)) {
2658 /* In case a Cond has been optimized we would get right to the start block
2659 with an invalid definition. */
2660 nin[i-1] = new_Bad();
2663 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2665 if (!is_Bad(prevBlock)) {
2666 #if PRECISE_EXC_CONTEXT
2667 if (get_opt_precise_exc_context() &&
2668 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2669 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2670 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2673 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2675 nin[i-1] = new_Bad();
2679 /* We want to pass the Phi0 node to the constructor: this finds additional
2680 optimization possibilities.
2681 The Phi0 node either is allocated in this function, or it comes from
2682 a former call to get_r_value_internal. In this case we may not yet
2683 exchange phi0, as this is done in mature_immBlock. */
2685 phi0_all = block->attr.block.graph_arr[pos];
2686 if (!((get_irn_op(phi0_all) == op_Phi) &&
2687 (get_irn_arity(phi0_all) == 0) &&
2688 (get_nodes_block(phi0_all) == block)))
2694 /* After collecting all predecessors into the array nin a new Phi node
2695 with these predecessors is created. This constructor contains an
2696 optimization: If all predecessors of the Phi node are identical it
2697 returns the only operand instead of a new Phi node. */
2698 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2700 /* In case we allocated a Phi0 node at the beginning of this procedure,
2701 we need to exchange this Phi0 with the real Phi. */
2703 exchange(phi0, res);
2704 block->attr.block.graph_arr[pos] = res;
2705 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2706 only an optimization. */
2712 /* This function returns the last definition of a variable. In case
2713 this variable was last defined in a previous block, Phi nodes are
2714 inserted. If the part of the firm graph containing the definition
2715 is not yet constructed, a dummy Phi node is returned. */
2717 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2720 /* There are 4 cases to treat.
2722 1. The block is not mature and we visit it the first time. We can not
2723 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2724 predecessors is returned. This node is added to the linked list (field
2725 "link") of the containing block to be completed when this block is
2726 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2729 2. The value is already known in this block, graph_arr[pos] is set and we
2730 visit the block the first time. We can return the value without
2731 creating any new nodes.
2733 3. The block is mature and we visit it the first time. A Phi node needs
2734 to be created (phi_merge). If the Phi is not needed, as all it's
2735 operands are the same value reaching the block through different
2736 paths, it's optimized away and the value itself is returned.
2738 4. The block is mature, and we visit it the second time. Now two
2739 subcases are possible:
2740 * The value was computed completely the last time we were here. This
2741 is the case if there is no loop. We can return the proper value.
2742 * The recursion that visited this node and set the flag did not
2743 return yet. We are computing a value in a loop and need to
2744 break the recursion. This case only happens if we visited
2745 the same block with phi_merge before, which inserted a Phi0.
2746 So we return the Phi0.
2749 /* case 4 -- already visited. */
2750 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2751 /* As phi_merge allocates a Phi0 this value is always defined. Here
2752 is the critical difference of the two algorithms. */
2753 assert(block->attr.block.graph_arr[pos]);
2754 return block->attr.block.graph_arr[pos];
2757 /* visited the first time */
2758 set_irn_visited(block, get_irg_visited(current_ir_graph));
2760 /* Get the local valid value */
2761 res = block->attr.block.graph_arr[pos];
2763 /* case 2 -- If the value is actually computed, return it. */
2764 if (res) { return res; };
2766 if (block->attr.block.matured) { /* case 3 */
2768 /* The Phi has the same amount of ins as the corresponding block. */
2769 int ins = get_irn_arity(block);
2771 NEW_ARR_A (ir_node *, nin, ins);
2773 /* Phi merge collects the predecessors and then creates a node. */
2774 res = phi_merge (block, pos, mode, nin, ins);
2776 } else { /* case 1 */
2777 /* The block is not mature, we don't know how many in's are needed. A Phi
2778 with zero predecessors is created. Such a Phi node is called Phi0
2779 node. The Phi0 is then added to the list of Phi0 nodes in this block
2780 to be matured by mature_immBlock later.
2781 The Phi0 has to remember the pos of it's internal value. If the real
2782 Phi is computed, pos is used to update the array with the local
2784 res = new_rd_Phi0 (current_ir_graph, block, mode);
2785 res->attr.phi0_pos = pos;
2786 res->link = block->link;
2790 /* If we get here, the frontend missed a use-before-definition error */
2793 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2794 assert (mode->code >= irm_F && mode->code <= irm_P);
2795 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2796 get_mode_null(mode));
2799 /* The local valid value is available now. */
2800 block->attr.block.graph_arr[pos] = res;
2805 #endif /* USE_FAST_PHI_CONSTRUCTION */
2807 /* ************************************************************************** */
2810 * Finalize a Block node, when all control flows are known.
2811 * Acceptable parameters are only Block nodes.
2814 mature_immBlock (ir_node *block)
2820 assert (get_irn_opcode(block) == iro_Block);
2821 /* @@@ should be commented in
2822 assert (!get_Block_matured(block) && "Block already matured"); */
2824 if (!get_Block_matured(block)) {
2825 ins = ARR_LEN (block->in)-1;
2826 /* Fix block parameters */
2827 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2829 /* An array for building the Phi nodes. */
2830 NEW_ARR_A (ir_node *, nin, ins);
2832 /* Traverse a chain of Phi nodes attached to this block and mature
2834 for (n = block->link; n; n=next) {
2835 inc_irg_visited(current_ir_graph);
2837 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2840 block->attr.block.matured = 1;
2842 /* Now, as the block is a finished firm node, we can optimize it.
2843 Since other nodes have been allocated since the block was created
2844 we can not free the node on the obstack. Therefore we have to call
2846 Unfortunately the optimization does not change a lot, as all allocated
2847 nodes refer to the unoptimized node.
2848 We can call _2, as global cse has no effect on blocks. */
2849 block = optimize_in_place_2(block);
2850 IRN_VRFY_IRG(block, current_ir_graph);
2855 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2857 return new_bd_Phi(db, current_ir_graph->current_block,
2862 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2864 return new_bd_Const(db, current_ir_graph->start_block,
2869 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2871 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2875 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, type *tp)
2877 return new_bd_Const_type(db, current_ir_graph->start_block,
2883 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2885 return new_bd_Id(db, current_ir_graph->current_block,
2890 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2892 return new_bd_Proj(db, current_ir_graph->current_block,
2897 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2900 assert(arg->op == op_Cond);
2901 arg->attr.c.kind = fragmentary;
2902 arg->attr.c.default_proj = max_proj;
2903 res = new_Proj (arg, mode_X, max_proj);
2908 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2910 return new_bd_Conv(db, current_ir_graph->current_block,
2915 new_d_Cast (dbg_info *db, ir_node *op, type *to_tp)
2917 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2921 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2923 return new_bd_Tuple(db, current_ir_graph->current_block,
2928 new_d_Add (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2930 return new_bd_Add(db, current_ir_graph->current_block,
2935 new_d_Sub (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2937 return new_bd_Sub(db, current_ir_graph->current_block,
2943 new_d_Minus (dbg_info *db, ir_node *op, ir_mode *mode)
2945 return new_bd_Minus(db, current_ir_graph->current_block,
2950 new_d_Mul (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2952 return new_bd_Mul(db, current_ir_graph->current_block,
2957 * allocate the frag array
2959 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2960 if (get_opt_precise_exc_context()) {
2961 if ((current_ir_graph->phase_state == phase_building) &&
2962 (get_irn_op(res) == op) && /* Could be optimized away. */
2963 !*frag_store) /* Could be a cse where the arr is already set. */ {
2964 *frag_store = new_frag_arr(res);
2971 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2974 res = new_bd_Quot (db, current_ir_graph->current_block,
2976 res->attr.except.pin_state = op_pin_state_pinned;
2977 #if PRECISE_EXC_CONTEXT
2978 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2985 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2988 res = new_bd_DivMod (db, current_ir_graph->current_block,
2990 res->attr.except.pin_state = op_pin_state_pinned;
2991 #if PRECISE_EXC_CONTEXT
2992 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2999 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3002 res = new_bd_Div (db, current_ir_graph->current_block,
3004 res->attr.except.pin_state = op_pin_state_pinned;
3005 #if PRECISE_EXC_CONTEXT
3006 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
3013 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3016 res = new_bd_Mod (db, current_ir_graph->current_block,
3018 res->attr.except.pin_state = op_pin_state_pinned;
3019 #if PRECISE_EXC_CONTEXT
3020 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
3027 new_d_And (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3029 return new_bd_And (db, current_ir_graph->current_block,
3034 new_d_Or (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3036 return new_bd_Or (db, current_ir_graph->current_block,
3041 new_d_Eor (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3043 return new_bd_Eor (db, current_ir_graph->current_block,
3048 new_d_Not (dbg_info *db, ir_node *op, ir_mode *mode)
3050 return new_bd_Not (db, current_ir_graph->current_block,
3055 new_d_Shl (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3057 return new_bd_Shl (db, current_ir_graph->current_block,
3062 new_d_Shr (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3064 return new_bd_Shr (db, current_ir_graph->current_block,
3069 new_d_Shrs (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3071 return new_bd_Shrs (db, current_ir_graph->current_block,
3076 new_d_Rot (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3078 return new_bd_Rot (db, current_ir_graph->current_block,
3083 new_d_Abs (dbg_info *db, ir_node *op, ir_mode *mode)
3085 return new_bd_Abs (db, current_ir_graph->current_block,
3090 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
3092 return new_bd_Cmp (db, current_ir_graph->current_block,
3097 new_d_Jmp (dbg_info *db)
3099 return new_bd_Jmp (db, current_ir_graph->current_block);
3103 new_d_IJmp (dbg_info *db, ir_node *tgt)
3105 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
3109 new_d_Cond (dbg_info *db, ir_node *c)
3111 return new_bd_Cond (db, current_ir_graph->current_block, c);
3115 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
3119 res = new_bd_Call (db, current_ir_graph->current_block,
3120 store, callee, arity, in, tp);
3121 #if PRECISE_EXC_CONTEXT
3122 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
3129 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
3131 return new_bd_Return (db, current_ir_graph->current_block,
3136 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
3138 return new_bd_Raise (db, current_ir_graph->current_block,
3143 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
3146 res = new_bd_Load (db, current_ir_graph->current_block,
3148 #if PRECISE_EXC_CONTEXT
3149 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
3156 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
3159 res = new_bd_Store (db, current_ir_graph->current_block,
3161 #if PRECISE_EXC_CONTEXT
3162 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
3169 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, type *alloc_type,
3173 res = new_bd_Alloc (db, current_ir_graph->current_block,
3174 store, size, alloc_type, where);
3175 #if PRECISE_EXC_CONTEXT
3176 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
3183 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
3184 ir_node *size, type *free_type, where_alloc where)
3186 return new_bd_Free (db, current_ir_graph->current_block,
3187 store, ptr, size, free_type, where);
3191 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
3192 /* GL: objptr was called frame before. Frame was a bad choice for the name
3193 as the operand could as well be a pointer to a dynamic object. */
3195 return new_bd_Sel (db, current_ir_graph->current_block,
3196 store, objptr, 0, NULL, ent);
3200 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
3202 return new_bd_Sel (db, current_ir_graph->current_block,
3203 store, objptr, n_index, index, sel);
3207 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
3209 return (new_bd_InstOf (db, current_ir_graph->current_block,
3210 store, objptr, ent));
3214 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, type *tp)
3216 return new_bd_SymConst_type (db, current_ir_graph->start_block,
3221 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
3223 return new_bd_SymConst (db, current_ir_graph->start_block,
3228 new_d_Sync (dbg_info *db, int arity, ir_node** in)
3230 return new_bd_Sync (db, current_ir_graph->current_block,
3237 return _new_d_Bad();
3241 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
3243 return new_bd_Confirm (db, current_ir_graph->current_block,
3248 new_d_Unknown (ir_mode *m)
3250 return new_bd_Unknown(m);
3254 new_d_CallBegin (dbg_info *db, ir_node *call)
3257 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
3262 new_d_EndReg (dbg_info *db)
3265 res = new_bd_EndReg(db, current_ir_graph->current_block);
3270 new_d_EndExcept (dbg_info *db)
3273 res = new_bd_EndExcept(db, current_ir_graph->current_block);
3278 new_d_Break (dbg_info *db)
3280 return new_bd_Break (db, current_ir_graph->current_block);
3284 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
3286 return new_bd_Filter (db, current_ir_graph->current_block,
3293 return _new_d_NoMem();
3297 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
3298 ir_node *ir_true, ir_mode *mode) {
3299 return new_bd_Mux (db, current_ir_graph->current_block,
3300 sel, ir_false, ir_true, mode);
3303 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
3304 ir_node *dst, ir_node *src, type *data_type) {
3306 res = new_bd_CopyB(db, current_ir_graph->current_block,
3307 store, dst, src, data_type);
3308 #if PRECISE_EXC_CONTEXT
3309 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
3314 /* ********************************************************************* */
3315 /* Comfortable interface with automatic Phi node construction. */
3316 /* (Uses also constructors of ?? interface, except new_Block. */
3317 /* ********************************************************************* */
3319 /* Block construction */
3320 /* immature Block without predecessors */
3321 ir_node *new_d_immBlock (dbg_info *db) {
3324 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3325 /* creates a new dynamic in-array as length of in is -1 */
3326 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3327 current_ir_graph->current_block = res;
3328 res->attr.block.matured = 0;
3329 res->attr.block.dead = 0;
3330 /* res->attr.block.exc = exc_normal; */
3331 /* res->attr.block.handler_entry = 0; */
3332 res->attr.block.irg = current_ir_graph;
3333 res->attr.block.backedge = NULL;
3334 res->attr.block.in_cg = NULL;
3335 res->attr.block.cg_backedge = NULL;
3336 set_Block_block_visited(res, 0);
3338 /* Create and initialize array for Phi-node construction. */
3339 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3340 current_ir_graph->n_loc);
3341 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3343 /* Immature block may not be optimized! */
3344 IRN_VRFY_IRG(res, current_ir_graph);
3350 new_immBlock (void) {
3351 return new_d_immBlock(NULL);
3354 /* add an edge to a jmp/control flow node */
3356 add_immBlock_pred (ir_node *block, ir_node *jmp)
3358 if (block->attr.block.matured) {
3359 assert(0 && "Error: Block already matured!\n");
3362 assert(jmp != NULL);
3363 ARR_APP1(ir_node *, block->in, jmp);
3367 /* changing the current block */
3369 set_cur_block (ir_node *target) {
3370 current_ir_graph->current_block = target;
3373 /* ************************ */
3374 /* parameter administration */
3376 /* get a value from the parameter array from the current block by its index */
3378 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3380 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3381 inc_irg_visited(current_ir_graph);
3383 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3385 /* get a value from the parameter array from the current block by its index */
3387 get_value (int pos, ir_mode *mode)
3389 return get_d_value(NULL, pos, mode);
3392 /* set a value at position pos in the parameter array from the current block */
3394 set_value (int pos, ir_node *value)
3396 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3397 assert(pos+1 < current_ir_graph->n_loc);
3398 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3401 /* get the current store */
3405 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3406 /* GL: one could call get_value instead */
3407 inc_irg_visited(current_ir_graph);
3408 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3411 /* set the current store */
3413 set_store (ir_node *store)
3415 /* GL: one could call set_value instead */
3416 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3417 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3421 keep_alive (ir_node *ka) {
3422 add_End_keepalive(current_ir_graph->end, ka);
3425 /* --- Useful access routines --- */
3426 /* Returns the current block of the current graph. To set the current
3427 block use set_cur_block. */
3428 ir_node *get_cur_block(void) {
3429 return get_irg_current_block(current_ir_graph);
3432 /* Returns the frame type of the current graph */
3433 type *get_cur_frame_type(void) {
3434 return get_irg_frame_type(current_ir_graph);
3438 /* ********************************************************************* */
3441 /* call once for each run of the library */
3443 init_cons(uninitialized_local_variable_func_t *func)
3445 default_initialize_local_variable = func;
3448 /* call for each graph */
3450 irg_finalize_cons (ir_graph *irg) {
3451 irg->phase_state = phase_high;
3455 irp_finalize_cons (void) {
3456 int i, n_irgs = get_irp_n_irgs();
3457 for (i = 0; i < n_irgs; i++) {
3458 irg_finalize_cons(get_irp_irg(i));
3460 irp->phase_state = phase_high;\
3466 ir_node *new_Block(int arity, ir_node **in) {
3467 return new_d_Block(NULL, arity, in);
3469 ir_node *new_Start (void) {
3470 return new_d_Start(NULL);
3472 ir_node *new_End (void) {
3473 return new_d_End(NULL);
3475 ir_node *new_Jmp (void) {
3476 return new_d_Jmp(NULL);
3478 ir_node *new_IJmp (ir_node *tgt) {
3479 return new_d_IJmp(NULL, tgt);
3481 ir_node *new_Cond (ir_node *c) {
3482 return new_d_Cond(NULL, c);
3484 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3485 return new_d_Return(NULL, store, arity, in);
3487 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3488 return new_d_Raise(NULL, store, obj);
3490 ir_node *new_Const (ir_mode *mode, tarval *con) {
3491 return new_d_Const(NULL, mode, con);
3494 ir_node *new_Const_long(ir_mode *mode, long value)
3496 return new_d_Const_long(NULL, mode, value);
3499 ir_node *new_Const_type(tarval *con, type *tp) {
3500 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3503 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3504 return new_d_SymConst(NULL, value, kind);
3506 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3507 return new_d_simpleSel(NULL, store, objptr, ent);
3509 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3511 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3513 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
3514 return new_d_InstOf (NULL, store, objptr, ent);
3516 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3518 return new_d_Call(NULL, store, callee, arity, in, tp);
3520 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3521 return new_d_Add(NULL, op1, op2, mode);
3523 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3524 return new_d_Sub(NULL, op1, op2, mode);
3526 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3527 return new_d_Minus(NULL, op, mode);
3529 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3530 return new_d_Mul(NULL, op1, op2, mode);
3532 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3533 return new_d_Quot(NULL, memop, op1, op2);
3535 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3536 return new_d_DivMod(NULL, memop, op1, op2);
3538 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3539 return new_d_Div(NULL, memop, op1, op2);
3541 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3542 return new_d_Mod(NULL, memop, op1, op2);
3544 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3545 return new_d_Abs(NULL, op, mode);
3547 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3548 return new_d_And(NULL, op1, op2, mode);
3550 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3551 return new_d_Or(NULL, op1, op2, mode);
3553 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3554 return new_d_Eor(NULL, op1, op2, mode);
3556 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3557 return new_d_Not(NULL, op, mode);
3559 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3560 return new_d_Shl(NULL, op, k, mode);
3562 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3563 return new_d_Shr(NULL, op, k, mode);
3565 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3566 return new_d_Shrs(NULL, op, k, mode);
3568 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3569 return new_d_Rot(NULL, op, k, mode);
3571 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3572 return new_d_Cmp(NULL, op1, op2);
3574 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3575 return new_d_Conv(NULL, op, mode);
3577 ir_node *new_Cast (ir_node *op, type *to_tp) {
3578 return new_d_Cast(NULL, op, to_tp);
3580 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3581 return new_d_Phi(NULL, arity, in, mode);
3583 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3584 return new_d_Load(NULL, store, addr, mode);
3586 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3587 return new_d_Store(NULL, store, addr, val);
3589 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
3590 where_alloc where) {
3591 return new_d_Alloc(NULL, store, size, alloc_type, where);
3593 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3594 type *free_type, where_alloc where) {
3595 return new_d_Free(NULL, store, ptr, size, free_type, where);
3597 ir_node *new_Sync (int arity, ir_node **in) {
3598 return new_d_Sync(NULL, arity, in);
3600 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3601 return new_d_Proj(NULL, arg, mode, proj);
3603 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3604 return new_d_defaultProj(NULL, arg, max_proj);
3606 ir_node *new_Tuple (int arity, ir_node **in) {
3607 return new_d_Tuple(NULL, arity, in);
3609 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3610 return new_d_Id(NULL, val, mode);
3612 ir_node *new_Bad (void) {
3615 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3616 return new_d_Confirm (NULL, val, bound, cmp);
3618 ir_node *new_Unknown(ir_mode *m) {
3619 return new_d_Unknown(m);
3621 ir_node *new_CallBegin (ir_node *callee) {
3622 return new_d_CallBegin(NULL, callee);
3624 ir_node *new_EndReg (void) {
3625 return new_d_EndReg(NULL);
3627 ir_node *new_EndExcept (void) {
3628 return new_d_EndExcept(NULL);
3630 ir_node *new_Break (void) {
3631 return new_d_Break(NULL);
3633 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3634 return new_d_Filter(NULL, arg, mode, proj);
3636 ir_node *new_NoMem (void) {
3637 return new_d_NoMem();
3639 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3640 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3642 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, type *data_type) {
3643 return new_d_CopyB(NULL, store, dst, src, data_type);