3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* Constructs a Block with a fixed number of predecessors.
67 Does not set current_block. Can not be used with automatic
68 Phi node construction. */
70 new_bd_Block (dbg_info *db, int arity, ir_node **in)
73 ir_graph *irg = current_ir_graph;
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
86 res->attr.block.extblk = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_bd_Start (dbg_info *db, ir_node *block)
96 ir_graph *irg = current_ir_graph;
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_bd_End (dbg_info *db, ir_node *block)
109 ir_graph *irg = current_ir_graph;
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 ir_graph *irg = current_ir_graph;
127 /* Don't assert that block matured: the use of this constructor is strongly
129 if ( get_Block_matured(block) )
130 assert( get_irn_arity(block) == arity );
132 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
134 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
136 for (i = arity-1; i >= 0; i--)
137 if (get_irn_op(in[i]) == op_Unknown) {
142 if (!has_unknown) res = optimize_node (res);
143 IRN_VRFY_IRG(res, irg);
145 /* Memory Phis in endless loops must be kept alive.
146 As we can't distinguish these easily we keep all of them alive. */
147 if ((res->op == op_Phi) && (mode == mode_M))
148 add_End_keepalive(irg->end, res);
153 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
156 ir_graph *irg = current_ir_graph;
158 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
159 res->attr.con.tv = con;
160 set_Const_type(res, tp); /* Call method because of complex assertion. */
161 res = optimize_node (res);
162 assert(get_Const_type(res) == tp);
163 IRN_VRFY_IRG(res, irg);
169 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
171 ir_graph *irg = current_ir_graph;
173 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
177 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
179 ir_graph *irg = current_ir_graph;
181 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
185 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
191 res = optimize_node(res);
192 IRN_VRFY_IRG(res, irg);
197 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
204 res->attr.proj = proj;
207 assert(get_Proj_pred(res));
208 assert(get_nodes_block(get_Proj_pred(res)));
210 res = optimize_node(res);
212 IRN_VRFY_IRG(res, irg);
218 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
222 ir_graph *irg = current_ir_graph;
224 assert(arg->op == op_Cond);
225 arg->attr.c.kind = fragmentary;
226 arg->attr.c.default_proj = max_proj;
227 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
232 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
235 ir_graph *irg = current_ir_graph;
237 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
238 res = optimize_node(res);
239 IRN_VRFY_IRG(res, irg);
244 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
247 ir_graph *irg = current_ir_graph;
249 assert(is_atomic_type(to_tp));
251 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
252 res->attr.cast.totype = to_tp;
253 res = optimize_node(res);
254 IRN_VRFY_IRG(res, irg);
259 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_bd_Add (dbg_info *db, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
276 ir_graph *irg = current_ir_graph;
280 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
281 res = optimize_node(res);
282 IRN_VRFY_IRG(res, irg);
287 new_bd_Sub (dbg_info *db, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
297 res = optimize_node (res);
298 IRN_VRFY_IRG(res, irg);
304 new_bd_Minus (dbg_info *db, ir_node *block,
305 ir_node *op, ir_mode *mode)
308 ir_graph *irg = current_ir_graph;
310 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
311 res = optimize_node(res);
312 IRN_VRFY_IRG(res, irg);
317 new_bd_Mul (dbg_info *db, ir_node *block,
318 ir_node *op1, ir_node *op2, ir_mode *mode)
322 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
327 res = optimize_node(res);
328 IRN_VRFY_IRG(res, irg);
333 new_bd_Quot (dbg_info *db, ir_node *block,
334 ir_node *memop, ir_node *op1, ir_node *op2)
338 ir_graph *irg = current_ir_graph;
343 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
344 res = optimize_node(res);
345 IRN_VRFY_IRG(res, irg);
350 new_bd_DivMod (dbg_info *db, ir_node *block,
351 ir_node *memop, ir_node *op1, ir_node *op2)
355 ir_graph *irg = current_ir_graph;
360 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
367 new_bd_Div (dbg_info *db, ir_node *block,
368 ir_node *memop, ir_node *op1, ir_node *op2)
372 ir_graph *irg = current_ir_graph;
377 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
378 res = optimize_node(res);
379 IRN_VRFY_IRG(res, irg);
384 new_bd_Mod (dbg_info *db, ir_node *block,
385 ir_node *memop, ir_node *op1, ir_node *op2)
389 ir_graph *irg = current_ir_graph;
394 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
395 res = optimize_node(res);
396 IRN_VRFY_IRG(res, irg);
401 new_bd_And (dbg_info *db, ir_node *block,
402 ir_node *op1, ir_node *op2, ir_mode *mode)
406 ir_graph *irg = current_ir_graph;
410 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_bd_Or (dbg_info *db, ir_node *block,
418 ir_node *op1, ir_node *op2, ir_mode *mode)
422 ir_graph *irg = current_ir_graph;
426 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
427 res = optimize_node(res);
428 IRN_VRFY_IRG(res, irg);
433 new_bd_Eor (dbg_info *db, ir_node *block,
434 ir_node *op1, ir_node *op2, ir_mode *mode)
438 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
443 res = optimize_node (res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_Not (dbg_info *db, ir_node *block,
450 ir_node *op, ir_mode *mode)
453 ir_graph *irg = current_ir_graph;
455 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_bd_Shl (dbg_info *db, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
467 ir_graph *irg = current_ir_graph;
471 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Shr (dbg_info *db, ir_node *block,
479 ir_node *op, ir_node *k, ir_mode *mode)
483 ir_graph *irg = current_ir_graph;
487 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_bd_Shrs (dbg_info *db, ir_node *block,
495 ir_node *op, ir_node *k, ir_mode *mode)
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
504 res = optimize_node(res);
505 IRN_VRFY_IRG(res, irg);
510 new_bd_Rot (dbg_info *db, ir_node *block,
511 ir_node *op, ir_node *k, ir_mode *mode)
515 ir_graph *irg = current_ir_graph;
519 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
520 res = optimize_node(res);
521 IRN_VRFY_IRG(res, irg);
526 new_bd_Abs (dbg_info *db, ir_node *block,
527 ir_node *op, ir_mode *mode)
530 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
533 res = optimize_node (res);
534 IRN_VRFY_IRG(res, irg);
539 new_bd_Cmp (dbg_info *db, ir_node *block,
540 ir_node *op1, ir_node *op2)
544 ir_graph *irg = current_ir_graph;
549 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Jmp (dbg_info *db, ir_node *block)
559 ir_graph *irg = current_ir_graph;
561 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
562 res = optimize_node (res);
563 IRN_VRFY_IRG (res, irg);
568 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
571 ir_graph *irg = current_ir_graph;
573 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
574 res = optimize_node (res);
575 IRN_VRFY_IRG (res, irg);
577 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
583 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
586 ir_graph *irg = current_ir_graph;
588 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
589 res->attr.c.kind = dense;
590 res->attr.c.default_proj = 0;
591 res->attr.c.pred = COND_JMP_PRED_NONE;
592 res = optimize_node (res);
593 IRN_VRFY_IRG(res, irg);
598 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
599 ir_node *callee, int arity, ir_node **in, ir_type *tp)
604 ir_graph *irg = current_ir_graph;
607 NEW_ARR_A(ir_node *, r_in, r_arity);
610 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
612 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
614 assert((get_unknown_type() == tp) || is_Method_type(tp));
615 set_Call_type(res, tp);
616 res->attr.call.exc.pin_state = op_pin_state_pinned;
617 res->attr.call.callee_arr = NULL;
618 res = optimize_node(res);
619 IRN_VRFY_IRG(res, irg);
624 new_bd_Return (dbg_info *db, ir_node *block,
625 ir_node *store, int arity, ir_node **in)
630 ir_graph *irg = current_ir_graph;
633 NEW_ARR_A (ir_node *, r_in, r_arity);
635 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
636 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
647 ir_graph *irg = current_ir_graph;
651 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
658 new_bd_Load (dbg_info *db, ir_node *block,
659 ir_node *store, ir_node *adr, ir_mode *mode)
663 ir_graph *irg = current_ir_graph;
667 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
668 res->attr.load.exc.pin_state = op_pin_state_pinned;
669 res->attr.load.load_mode = mode;
670 res->attr.load.volatility = volatility_non_volatile;
671 res = optimize_node(res);
672 IRN_VRFY_IRG(res, irg);
677 new_bd_Store (dbg_info *db, ir_node *block,
678 ir_node *store, ir_node *adr, ir_node *val)
682 ir_graph *irg = current_ir_graph;
687 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
688 res->attr.store.exc.pin_state = op_pin_state_pinned;
689 res->attr.store.volatility = volatility_non_volatile;
690 res = optimize_node(res);
691 IRN_VRFY_IRG(res, irg);
696 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
697 ir_node *size, ir_type *alloc_type, where_alloc where)
701 ir_graph *irg = current_ir_graph;
705 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
706 res->attr.a.exc.pin_state = op_pin_state_pinned;
707 res->attr.a.where = where;
708 res->attr.a.type = alloc_type;
709 res = optimize_node(res);
710 IRN_VRFY_IRG(res, irg);
715 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
716 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
720 ir_graph *irg = current_ir_graph;
725 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
726 res->attr.f.where = where;
727 res->attr.f.type = free_type;
728 res = optimize_node(res);
729 IRN_VRFY_IRG(res, irg);
734 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
735 int arity, ir_node **in, entity *ent)
740 ir_graph *irg = current_ir_graph;
742 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
745 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
748 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
750 * FIXM: Sel's can select functions which should be of mode mode_P_code.
752 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
753 res->attr.s.ent = ent;
754 res = optimize_node(res);
755 IRN_VRFY_IRG(res, irg);
760 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
761 ir_node *objptr, ir_type *type)
766 ir_graph *irg = current_ir_graph;
769 NEW_ARR_A(ir_node *, r_in, r_arity);
773 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
774 res->attr.io.type = type;
776 /* res = optimize(res); */
777 IRN_VRFY_IRG(res, irg);
782 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
783 symconst_kind symkind, ir_type *tp) {
786 ir_graph *irg = current_ir_graph;
788 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
789 mode = mode_P_data; /* FIXME: can be mode_P_code */
793 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
795 res->attr.i.num = symkind;
796 res->attr.i.sym = value;
799 res = optimize_node(res);
800 IRN_VRFY_IRG(res, irg);
805 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
806 symconst_kind symkind)
808 ir_graph *irg = current_ir_graph;
810 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
815 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
818 ir_graph *irg = current_ir_graph;
820 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
821 res = optimize_node(res);
822 IRN_VRFY_IRG(res, irg);
827 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
829 ir_node *in[2], *res;
830 ir_graph *irg = current_ir_graph;
834 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
835 res->attr.confirm_cmp = cmp;
836 res = optimize_node (res);
837 IRN_VRFY_IRG(res, irg);
841 /* this function is often called with current_ir_graph unset */
843 new_bd_Unknown (ir_mode *m)
846 ir_graph *irg = current_ir_graph;
848 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
849 res = optimize_node(res);
854 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
858 ir_graph *irg = current_ir_graph;
860 in[0] = get_Call_ptr(call);
861 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
862 /* res->attr.callbegin.irg = irg; */
863 res->attr.callbegin.call = call;
864 res = optimize_node(res);
865 IRN_VRFY_IRG(res, irg);
870 new_bd_EndReg (dbg_info *db, ir_node *block)
873 ir_graph *irg = current_ir_graph;
875 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
877 IRN_VRFY_IRG(res, irg);
882 new_bd_EndExcept (dbg_info *db, ir_node *block)
885 ir_graph *irg = current_ir_graph;
887 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
888 irg->end_except = res;
889 IRN_VRFY_IRG (res, irg);
894 new_bd_Break (dbg_info *db, ir_node *block)
897 ir_graph *irg = current_ir_graph;
899 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
900 res = optimize_node(res);
901 IRN_VRFY_IRG(res, irg);
906 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
910 ir_graph *irg = current_ir_graph;
912 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
913 res->attr.filter.proj = proj;
914 res->attr.filter.in_cg = NULL;
915 res->attr.filter.backedge = NULL;
918 assert(get_Proj_pred(res));
919 assert(get_nodes_block(get_Proj_pred(res)));
921 res = optimize_node(res);
922 IRN_VRFY_IRG(res, irg);
927 new_bd_Mux (dbg_info *db, ir_node *block,
928 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
932 ir_graph *irg = current_ir_graph;
938 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
941 res = optimize_node(res);
942 IRN_VRFY_IRG(res, irg);
947 new_bd_CopyB (dbg_info *db, ir_node *block,
948 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
952 ir_graph *irg = current_ir_graph;
958 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
960 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
961 res->attr.copyb.data_type = data_type;
962 res = optimize_node(res);
963 IRN_VRFY_IRG(res, irg);
968 new_bd_Bound (dbg_info *db, ir_node *block,
969 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
973 ir_graph *irg = current_ir_graph;
980 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
982 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
983 res = optimize_node(res);
984 IRN_VRFY_IRG(res, irg);
988 /* --------------------------------------------- */
989 /* private interfaces, for professional use only */
990 /* --------------------------------------------- */
992 /* Constructs a Block with a fixed number of predecessors.
993 Does not set current_block. Can not be used with automatic
994 Phi node construction. */
996 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
998 ir_graph *rem = current_ir_graph;
1001 current_ir_graph = irg;
1002 res = new_bd_Block (db, arity, in);
1003 current_ir_graph = rem;
1009 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
1011 ir_graph *rem = current_ir_graph;
1014 current_ir_graph = irg;
1015 res = new_bd_Start (db, block);
1016 current_ir_graph = rem;
1022 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
1025 ir_graph *rem = current_ir_graph;
1027 current_ir_graph = rem;
1028 res = new_bd_End (db, block);
1029 current_ir_graph = rem;
1034 /* Creates a Phi node with all predecessors. Calling this constructor
1035 is only allowed if the corresponding block is mature. */
1037 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
1040 ir_graph *rem = current_ir_graph;
1042 current_ir_graph = irg;
1043 res = new_bd_Phi (db, block,arity, in, mode);
1044 current_ir_graph = rem;
1050 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
1053 ir_graph *rem = current_ir_graph;
1055 current_ir_graph = irg;
1056 res = new_bd_Const_type (db, block, mode, con, tp);
1057 current_ir_graph = rem;
1063 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
1066 ir_graph *rem = current_ir_graph;
1068 current_ir_graph = irg;
1069 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
1070 current_ir_graph = rem;
1076 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
1078 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
1082 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
1085 ir_graph *rem = current_ir_graph;
1087 current_ir_graph = irg;
1088 res = new_bd_Id(db, block, val, mode);
1089 current_ir_graph = rem;
1095 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1099 ir_graph *rem = current_ir_graph;
1101 current_ir_graph = irg;
1102 res = new_bd_Proj(db, block, arg, mode, proj);
1103 current_ir_graph = rem;
1109 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1113 ir_graph *rem = current_ir_graph;
1115 current_ir_graph = irg;
1116 res = new_bd_defaultProj(db, block, arg, max_proj);
1117 current_ir_graph = rem;
1123 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1126 ir_graph *rem = current_ir_graph;
1128 current_ir_graph = irg;
1129 res = new_bd_Conv(db, block, op, mode);
1130 current_ir_graph = rem;
1136 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1139 ir_graph *rem = current_ir_graph;
1141 current_ir_graph = irg;
1142 res = new_bd_Cast(db, block, op, to_tp);
1143 current_ir_graph = rem;
1149 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1152 ir_graph *rem = current_ir_graph;
1154 current_ir_graph = irg;
1155 res = new_bd_Tuple(db, block, arity, in);
1156 current_ir_graph = rem;
1162 new_rd_Add (dbg_info *db, ir_graph *irg, ir_node *block,
1163 ir_node *op1, ir_node *op2, ir_mode *mode)
1166 ir_graph *rem = current_ir_graph;
1168 current_ir_graph = irg;
1169 res = new_bd_Add(db, block, op1, op2, mode);
1170 current_ir_graph = rem;
1176 new_rd_Sub (dbg_info *db, ir_graph *irg, ir_node *block,
1177 ir_node *op1, ir_node *op2, ir_mode *mode)
1180 ir_graph *rem = current_ir_graph;
1182 current_ir_graph = irg;
1183 res = new_bd_Sub(db, block, op1, op2, mode);
1184 current_ir_graph = rem;
1190 new_rd_Minus (dbg_info *db, ir_graph *irg, ir_node *block,
1191 ir_node *op, ir_mode *mode)
1194 ir_graph *rem = current_ir_graph;
1196 current_ir_graph = irg;
1197 res = new_bd_Minus(db, block, op, mode);
1198 current_ir_graph = rem;
1204 new_rd_Mul (dbg_info *db, ir_graph *irg, ir_node *block,
1205 ir_node *op1, ir_node *op2, ir_mode *mode)
1208 ir_graph *rem = current_ir_graph;
1210 current_ir_graph = irg;
1211 res = new_bd_Mul(db, block, op1, op2, mode);
1212 current_ir_graph = rem;
1218 new_rd_Quot (dbg_info *db, ir_graph *irg, ir_node *block,
1219 ir_node *memop, ir_node *op1, ir_node *op2)
1222 ir_graph *rem = current_ir_graph;
1224 current_ir_graph = irg;
1225 res = new_bd_Quot(db, block, memop, op1, op2);
1226 current_ir_graph = rem;
1232 new_rd_DivMod (dbg_info *db, ir_graph *irg, ir_node *block,
1233 ir_node *memop, ir_node *op1, ir_node *op2)
1236 ir_graph *rem = current_ir_graph;
1238 current_ir_graph = irg;
1239 res = new_bd_DivMod(db, block, memop, op1, op2);
1240 current_ir_graph = rem;
1246 new_rd_Div (dbg_info *db, ir_graph *irg, ir_node *block,
1247 ir_node *memop, ir_node *op1, ir_node *op2)
1250 ir_graph *rem = current_ir_graph;
1252 current_ir_graph = irg;
1253 res = new_bd_Div (db, block, memop, op1, op2);
1254 current_ir_graph =rem;
1260 new_rd_Mod (dbg_info *db, ir_graph *irg, ir_node *block,
1261 ir_node *memop, ir_node *op1, ir_node *op2)
1264 ir_graph *rem = current_ir_graph;
1266 current_ir_graph = irg;
1267 res = new_bd_Mod(db, block, memop, op1, op2);
1268 current_ir_graph = rem;
1274 new_rd_And (dbg_info *db, ir_graph *irg, ir_node *block,
1275 ir_node *op1, ir_node *op2, ir_mode *mode)
1278 ir_graph *rem = current_ir_graph;
1280 current_ir_graph = irg;
1281 res = new_bd_And(db, block, op1, op2, mode);
1282 current_ir_graph = rem;
1288 new_rd_Or (dbg_info *db, ir_graph *irg, ir_node *block,
1289 ir_node *op1, ir_node *op2, ir_mode *mode)
1292 ir_graph *rem = current_ir_graph;
1294 current_ir_graph = irg;
1295 res = new_bd_Or(db, block, op1, op2, mode);
1296 current_ir_graph = rem;
1302 new_rd_Eor (dbg_info *db, ir_graph *irg, ir_node *block,
1303 ir_node *op1, ir_node *op2, ir_mode *mode)
1306 ir_graph *rem = current_ir_graph;
1308 current_ir_graph = irg;
1309 res = new_bd_Eor(db, block, op1, op2, mode);
1310 current_ir_graph = rem;
1316 new_rd_Not (dbg_info *db, ir_graph *irg, ir_node *block,
1317 ir_node *op, ir_mode *mode)
1320 ir_graph *rem = current_ir_graph;
1322 current_ir_graph = irg;
1323 res = new_bd_Not(db, block, op, mode);
1324 current_ir_graph = rem;
1330 new_rd_Shl (dbg_info *db, ir_graph *irg, ir_node *block,
1331 ir_node *op, ir_node *k, ir_mode *mode)
1334 ir_graph *rem = current_ir_graph;
1336 current_ir_graph = irg;
1337 res = new_bd_Shl (db, block, op, k, mode);
1338 current_ir_graph = rem;
1344 new_rd_Shr (dbg_info *db, ir_graph *irg, ir_node *block,
1345 ir_node *op, ir_node *k, ir_mode *mode)
1348 ir_graph *rem = current_ir_graph;
1350 current_ir_graph = irg;
1351 res = new_bd_Shr(db, block, op, k, mode);
1352 current_ir_graph = rem;
1358 new_rd_Shrs (dbg_info *db, ir_graph *irg, ir_node *block,
1359 ir_node *op, ir_node *k, ir_mode *mode)
1362 ir_graph *rem = current_ir_graph;
1364 current_ir_graph = irg;
1365 res = new_bd_Shrs(db, block, op, k, mode);
1366 current_ir_graph = rem;
1372 new_rd_Rot (dbg_info *db, ir_graph *irg, ir_node *block,
1373 ir_node *op, ir_node *k, ir_mode *mode)
1376 ir_graph *rem = current_ir_graph;
1378 current_ir_graph = irg;
1379 res = new_bd_Rot(db, block, op, k, mode);
1380 current_ir_graph = rem;
1386 new_rd_Abs (dbg_info *db, ir_graph *irg, ir_node *block,
1387 ir_node *op, ir_mode *mode)
1390 ir_graph *rem = current_ir_graph;
1392 current_ir_graph = irg;
1393 res = new_bd_Abs(db, block, op, mode);
1394 current_ir_graph = rem;
1400 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1401 ir_node *op1, ir_node *op2)
1404 ir_graph *rem = current_ir_graph;
1406 current_ir_graph = irg;
1407 res = new_bd_Cmp(db, block, op1, op2);
1408 current_ir_graph = rem;
1414 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1417 ir_graph *rem = current_ir_graph;
1419 current_ir_graph = irg;
1420 res = new_bd_Jmp(db, block);
1421 current_ir_graph = rem;
1427 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1430 ir_graph *rem = current_ir_graph;
1432 current_ir_graph = irg;
1433 res = new_bd_IJmp(db, block, tgt);
1434 current_ir_graph = rem;
1440 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1443 ir_graph *rem = current_ir_graph;
1445 current_ir_graph = irg;
1446 res = new_bd_Cond(db, block, c);
1447 current_ir_graph = rem;
1453 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1454 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1457 ir_graph *rem = current_ir_graph;
1459 current_ir_graph = irg;
1460 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1461 current_ir_graph = rem;
1467 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1468 ir_node *store, int arity, ir_node **in)
1471 ir_graph *rem = current_ir_graph;
1473 current_ir_graph = irg;
1474 res = new_bd_Return(db, block, store, arity, in);
1475 current_ir_graph = rem;
1481 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1484 ir_graph *rem = current_ir_graph;
1486 current_ir_graph = irg;
1487 res = new_bd_Raise(db, block, store, obj);
1488 current_ir_graph = rem;
1494 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1495 ir_node *store, ir_node *adr, ir_mode *mode)
1498 ir_graph *rem = current_ir_graph;
1500 current_ir_graph = irg;
1501 res = new_bd_Load(db, block, store, adr, mode);
1502 current_ir_graph = rem;
1508 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1509 ir_node *store, ir_node *adr, ir_node *val)
1512 ir_graph *rem = current_ir_graph;
1514 current_ir_graph = irg;
1515 res = new_bd_Store(db, block, store, adr, val);
1516 current_ir_graph = rem;
1522 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1523 ir_node *size, ir_type *alloc_type, where_alloc where)
1526 ir_graph *rem = current_ir_graph;
1528 current_ir_graph = irg;
1529 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1530 current_ir_graph = rem;
1536 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1537 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1540 ir_graph *rem = current_ir_graph;
1542 current_ir_graph = irg;
1543 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1544 current_ir_graph = rem;
1550 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1551 ir_node *store, ir_node *objptr, entity *ent)
1554 ir_graph *rem = current_ir_graph;
1556 current_ir_graph = irg;
1557 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1558 current_ir_graph = rem;
1564 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1565 int arity, ir_node **in, entity *ent)
1568 ir_graph *rem = current_ir_graph;
1570 current_ir_graph = irg;
1571 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1572 current_ir_graph = rem;
1578 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1579 ir_node *objptr, ir_type *type)
1582 ir_graph *rem = current_ir_graph;
1584 current_ir_graph = irg;
1585 res = new_bd_InstOf(db, block, store, objptr, type);
1586 current_ir_graph = rem;
1592 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1593 symconst_kind symkind, ir_type *tp)
1596 ir_graph *rem = current_ir_graph;
1598 current_ir_graph = irg;
1599 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1600 current_ir_graph = rem;
1606 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1607 symconst_kind symkind)
1609 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1613 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1615 symconst_symbol sym = {(ir_type *)symbol};
1616 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1619 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1620 symconst_symbol sym = {(ir_type *)symbol};
1621 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1624 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1625 symconst_symbol sym = {symbol};
1626 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1629 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1630 symconst_symbol sym = {symbol};
1631 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1635 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1638 ir_graph *rem = current_ir_graph;
1640 current_ir_graph = irg;
1641 res = new_bd_Sync(db, block, arity, in);
1642 current_ir_graph = rem;
1648 new_rd_Bad (ir_graph *irg)
1654 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1657 ir_graph *rem = current_ir_graph;
1659 current_ir_graph = irg;
1660 res = new_bd_Confirm(db, block, val, bound, cmp);
1661 current_ir_graph = rem;
1666 /* this function is often called with current_ir_graph unset */
1668 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1671 ir_graph *rem = current_ir_graph;
1673 current_ir_graph = irg;
1674 res = new_bd_Unknown(m);
1675 current_ir_graph = rem;
1681 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1684 ir_graph *rem = current_ir_graph;
1686 current_ir_graph = irg;
1687 res = new_bd_CallBegin(db, block, call);
1688 current_ir_graph = rem;
1694 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1698 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1700 IRN_VRFY_IRG(res, irg);
1705 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1709 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1710 irg->end_except = res;
1711 IRN_VRFY_IRG (res, irg);
1716 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1719 ir_graph *rem = current_ir_graph;
1721 current_ir_graph = irg;
1722 res = new_bd_Break(db, block);
1723 current_ir_graph = rem;
1729 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1733 ir_graph *rem = current_ir_graph;
1735 current_ir_graph = irg;
1736 res = new_bd_Filter(db, block, arg, mode, proj);
1737 current_ir_graph = rem;
1743 new_rd_NoMem (ir_graph *irg) {
1748 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1749 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1752 ir_graph *rem = current_ir_graph;
1754 current_ir_graph = irg;
1755 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1756 current_ir_graph = rem;
1761 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1762 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1765 ir_graph *rem = current_ir_graph;
1767 current_ir_graph = irg;
1768 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1769 current_ir_graph = rem;
1774 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1775 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1778 ir_graph *rem = current_ir_graph;
1780 current_ir_graph = irg;
1781 res = new_bd_Bound(db, block, store, idx, lower, upper);
1782 current_ir_graph = rem;
1787 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1788 return new_rd_Block(NULL, irg, arity, in);
1790 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1791 return new_rd_Start(NULL, irg, block);
1793 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1794 return new_rd_End(NULL, irg, block);
1796 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1797 return new_rd_Jmp(NULL, irg, block);
1799 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1800 return new_rd_IJmp(NULL, irg, block, tgt);
1802 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1803 return new_rd_Cond(NULL, irg, block, c);
1805 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1806 ir_node *store, int arity, ir_node **in) {
1807 return new_rd_Return(NULL, irg, block, store, arity, in);
1809 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1810 ir_node *store, ir_node *obj) {
1811 return new_rd_Raise(NULL, irg, block, store, obj);
1813 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1814 ir_mode *mode, tarval *con) {
1815 return new_rd_Const(NULL, irg, block, mode, con);
1818 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1819 ir_mode *mode, long value) {
1820 return new_rd_Const_long(NULL, irg, block, mode, value);
1823 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1824 ir_mode *mode, tarval *con, ir_type *tp) {
1825 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1828 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1829 symconst_symbol value, symconst_kind symkind) {
1830 return new_rd_SymConst(NULL, irg, block, value, symkind);
1832 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1833 ir_node *objptr, int n_index, ir_node **index,
1835 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1837 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1839 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1841 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1842 ir_node *callee, int arity, ir_node **in,
1844 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1846 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1847 ir_node *op1, ir_node *op2, ir_mode *mode) {
1848 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1850 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1851 ir_node *op1, ir_node *op2, ir_mode *mode) {
1852 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1854 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1855 ir_node *op, ir_mode *mode) {
1856 return new_rd_Minus(NULL, irg, block, op, mode);
1858 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1859 ir_node *op1, ir_node *op2, ir_mode *mode) {
1860 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1862 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1863 ir_node *memop, ir_node *op1, ir_node *op2) {
1864 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1866 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1867 ir_node *memop, ir_node *op1, ir_node *op2) {
1868 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1870 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1871 ir_node *memop, ir_node *op1, ir_node *op2) {
1872 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1874 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1875 ir_node *memop, ir_node *op1, ir_node *op2) {
1876 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1878 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1879 ir_node *op, ir_mode *mode) {
1880 return new_rd_Abs(NULL, irg, block, op, mode);
1882 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1883 ir_node *op1, ir_node *op2, ir_mode *mode) {
1884 return new_rd_And(NULL, irg, block, op1, op2, mode);
1886 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1887 ir_node *op1, ir_node *op2, ir_mode *mode) {
1888 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1890 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1891 ir_node *op1, ir_node *op2, ir_mode *mode) {
1892 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1894 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1895 ir_node *op, ir_mode *mode) {
1896 return new_rd_Not(NULL, irg, block, op, mode);
1898 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1899 ir_node *op1, ir_node *op2) {
1900 return new_rd_Cmp(NULL, irg, block, op1, op2);
1902 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1903 ir_node *op, ir_node *k, ir_mode *mode) {
1904 return new_rd_Shl(NULL, irg, block, op, k, mode);
1906 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1907 ir_node *op, ir_node *k, ir_mode *mode) {
1908 return new_rd_Shr(NULL, irg, block, op, k, mode);
1910 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1911 ir_node *op, ir_node *k, ir_mode *mode) {
1912 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1914 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1915 ir_node *op, ir_node *k, ir_mode *mode) {
1916 return new_rd_Rot(NULL, irg, block, op, k, mode);
1918 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1919 ir_node *op, ir_mode *mode) {
1920 return new_rd_Conv(NULL, irg, block, op, mode);
1922 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1923 return new_rd_Cast(NULL, irg, block, op, to_tp);
1925 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1926 ir_node **in, ir_mode *mode) {
1927 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1929 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1930 ir_node *store, ir_node *adr, ir_mode *mode) {
1931 return new_rd_Load(NULL, irg, block, store, adr, mode);
1933 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1934 ir_node *store, ir_node *adr, ir_node *val) {
1935 return new_rd_Store(NULL, irg, block, store, adr, val);
1937 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1938 ir_node *size, ir_type *alloc_type, where_alloc where) {
1939 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1941 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1942 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1943 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1945 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1946 return new_rd_Sync(NULL, irg, block, arity, in);
1948 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1949 ir_mode *mode, long proj) {
1950 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1952 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1954 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1956 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1957 int arity, ir_node **in) {
1958 return new_rd_Tuple(NULL, irg, block, arity, in );
1960 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1961 ir_node *val, ir_mode *mode) {
1962 return new_rd_Id(NULL, irg, block, val, mode);
1964 ir_node *new_r_Bad (ir_graph *irg) {
1965 return new_rd_Bad(irg);
1967 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1968 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1970 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1971 return new_rd_Unknown(irg, m);
1973 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1974 return new_rd_CallBegin(NULL, irg, block, callee);
1976 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1977 return new_rd_EndReg(NULL, irg, block);
1979 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1980 return new_rd_EndExcept(NULL, irg, block);
1982 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1983 return new_rd_Break(NULL, irg, block);
1985 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1986 ir_mode *mode, long proj) {
1987 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1989 ir_node *new_r_NoMem (ir_graph *irg) {
1990 return new_rd_NoMem(irg);
1992 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1993 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1994 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1996 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1997 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1998 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
2000 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
2001 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
2002 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
2005 /** ********************/
2006 /** public interfaces */
2007 /** construction tools */
2011 * - create a new Start node in the current block
2013 * @return s - pointer to the created Start node
2018 new_d_Start (dbg_info *db)
2022 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
2023 op_Start, mode_T, 0, NULL);
2024 /* res->attr.start.irg = current_ir_graph; */
2026 res = optimize_node(res);
2027 IRN_VRFY_IRG(res, current_ir_graph);
2032 new_d_End (dbg_info *db)
2035 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
2036 op_End, mode_X, -1, NULL);
2037 res = optimize_node(res);
2038 IRN_VRFY_IRG(res, current_ir_graph);
2043 /* Constructs a Block with a fixed number of predecessors.
2044 Does set current_block. Can be used with automatic Phi
2045 node construction. */
2047 new_d_Block (dbg_info *db, int arity, ir_node **in)
2051 int has_unknown = 0;
2053 res = new_bd_Block(db, arity, in);
2055 /* Create and initialize array for Phi-node construction. */
2056 if (get_irg_phase_state(current_ir_graph) == phase_building) {
2057 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2058 current_ir_graph->n_loc);
2059 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2062 for (i = arity-1; i >= 0; i--)
2063 if (get_irn_op(in[i]) == op_Unknown) {
2068 if (!has_unknown) res = optimize_node(res);
2069 current_ir_graph->current_block = res;
2071 IRN_VRFY_IRG(res, current_ir_graph);
2076 /* ***********************************************************************/
2077 /* Methods necessary for automatic Phi node creation */
2079 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2080 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2081 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2082 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2084 Call Graph: ( A ---> B == A "calls" B)
2086 get_value mature_immBlock
2094 get_r_value_internal |
2098 new_rd_Phi0 new_rd_Phi_in
2100 * *************************************************************************** */
2102 /** Creates a Phi node with 0 predecessors */
2103 static INLINE ir_node *
2104 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2108 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
2109 IRN_VRFY_IRG(res, irg);
2113 /* There are two implementations of the Phi node construction. The first
2114 is faster, but does not work for blocks with more than 2 predecessors.
2115 The second works always but is slower and causes more unnecessary Phi
2117 Select the implementations by the following preprocessor flag set in
2119 #if USE_FAST_PHI_CONSTRUCTION
2121 /* This is a stack used for allocating and deallocating nodes in
2122 new_rd_Phi_in. The original implementation used the obstack
2123 to model this stack, now it is explicit. This reduces side effects.
2125 #if USE_EXPLICIT_PHI_IN_STACK
2127 new_Phi_in_stack(void) {
2130 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
2132 res->stack = NEW_ARR_F (ir_node *, 0);
2139 free_Phi_in_stack(Phi_in_stack *s) {
2140 DEL_ARR_F(s->stack);
2144 free_to_Phi_in_stack(ir_node *phi) {
2145 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
2146 current_ir_graph->Phi_in_stack->pos)
2147 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
2149 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
2151 (current_ir_graph->Phi_in_stack->pos)++;
2154 static INLINE ir_node *
2155 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
2156 int arity, ir_node **in) {
2158 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
2159 int pos = current_ir_graph->Phi_in_stack->pos;
2163 /* We need to allocate a new node */
2164 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
2165 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
2167 /* reuse the old node and initialize it again. */
2170 assert (res->kind == k_ir_node);
2171 assert (res->op == op_Phi);
2175 assert (arity >= 0);
2176 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
2177 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
2179 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
2181 (current_ir_graph->Phi_in_stack->pos)--;
2185 #endif /* USE_EXPLICIT_PHI_IN_STACK */
2187 /* Creates a Phi node with a given, fixed array **in of predecessors.
2188 If the Phi node is unnecessary, as the same value reaches the block
2189 through all control flow paths, it is eliminated and the value
2190 returned directly. This constructor is only intended for use in
2191 the automatic Phi node generation triggered by get_value or mature.
2192 The implementation is quite tricky and depends on the fact, that
2193 the nodes are allocated on a stack:
2194 The in array contains predecessors and NULLs. The NULLs appear,
2195 if get_r_value_internal, that computed the predecessors, reached
2196 the same block on two paths. In this case the same value reaches
2197 this block on both paths, there is no definition in between. We need
2198 not allocate a Phi where these path's merge, but we have to communicate
2199 this fact to the caller. This happens by returning a pointer to the
2200 node the caller _will_ allocate. (Yes, we predict the address. We can
2201 do so because the nodes are allocated on the obstack.) The caller then
2202 finds a pointer to itself and, when this routine is called again,
2205 static INLINE ir_node *
2206 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2209 ir_node *res, *known;
2211 /* Allocate a new node on the obstack. This can return a node to
2212 which some of the pointers in the in-array already point.
2213 Attention: the constructor copies the in array, i.e., the later
2214 changes to the array in this routine do not affect the
2215 constructed node! If the in array contains NULLs, there will be
2216 missing predecessors in the returned node. Is this a possible
2217 internal state of the Phi node generation? */
2218 #if USE_EXPLICIT_PHI_IN_STACK
2219 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
2221 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2222 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2225 /* The in-array can contain NULLs. These were returned by
2226 get_r_value_internal if it reached the same block/definition on a
2227 second path. The NULLs are replaced by the node itself to
2228 simplify the test in the next loop. */
2229 for (i = 0; i < ins; ++i) {
2234 /* This loop checks whether the Phi has more than one predecessor.
2235 If so, it is a real Phi node and we break the loop. Else the Phi
2236 node merges the same definition on several paths and therefore is
2238 for (i = 0; i < ins; ++i) {
2239 if (in[i] == res || in[i] == known)
2248 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2250 #if USE_EXPLICIT_PHI_IN_STACK
2251 free_to_Phi_in_stack(res);
2253 edges_node_deleted(res, current_ir_graph);
2254 obstack_free(current_ir_graph->obst, res);
2258 res = optimize_node (res);
2259 IRN_VRFY_IRG(res, irg);
2262 /* return the pointer to the Phi node. This node might be deallocated! */
2267 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2270 allocates and returns this node. The routine called to allocate the
2271 node might optimize it away and return a real value, or even a pointer
2272 to a deallocated Phi node on top of the obstack!
2273 This function is called with an in-array of proper size. **/
2275 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2277 ir_node *prevBlock, *res;
2280 /* This loop goes to all predecessor blocks of the block the Phi node is in
2281 and there finds the operands of the Phi node by calling
2282 get_r_value_internal. */
2283 for (i = 1; i <= ins; ++i) {
2284 assert (block->in[i]);
2285 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2287 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2290 /* After collecting all predecessors into the array nin a new Phi node
2291 with these predecessors is created. This constructor contains an
2292 optimization: If all predecessors of the Phi node are identical it
2293 returns the only operand instead of a new Phi node. If the value
2294 passes two different control flow edges without being defined, and
2295 this is the second path treated, a pointer to the node that will be
2296 allocated for the first path (recursion) is returned. We already
2297 know the address of this node, as it is the next node to be allocated
2298 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2299 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2301 /* Now we now the value for "pos" and can enter it in the array with
2302 all known local variables. Attention: this might be a pointer to
2303 a node, that later will be allocated!!! See new_rd_Phi_in.
2304 If this is called in mature, after some set_value in the same block,
2305 the proper value must not be overwritten:
2307 get_value (makes Phi0, put's it into graph_arr)
2308 set_value (overwrites Phi0 in graph_arr)
2309 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2312 if (!block->attr.block.graph_arr[pos]) {
2313 block->attr.block.graph_arr[pos] = res;
2315 /* printf(" value already computed by %s\n",
2316 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2322 /* This function returns the last definition of a variable. In case
2323 this variable was last defined in a previous block, Phi nodes are
2324 inserted. If the part of the firm graph containing the definition
2325 is not yet constructed, a dummy Phi node is returned. */
2327 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2330 /* There are 4 cases to treat.
2332 1. The block is not mature and we visit it the first time. We can not
2333 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2334 predecessors is returned. This node is added to the linked list (field
2335 "link") of the containing block to be completed when this block is
2336 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2339 2. The value is already known in this block, graph_arr[pos] is set and we
2340 visit the block the first time. We can return the value without
2341 creating any new nodes.
2343 3. The block is mature and we visit it the first time. A Phi node needs
2344 to be created (phi_merge). If the Phi is not needed, as all it's
2345 operands are the same value reaching the block through different
2346 paths, it's optimized away and the value itself is returned.
2348 4. The block is mature, and we visit it the second time. Now two
2349 subcases are possible:
2350 * The value was computed completely the last time we were here. This
2351 is the case if there is no loop. We can return the proper value.
2352 * The recursion that visited this node and set the flag did not
2353 return yet. We are computing a value in a loop and need to
2354 break the recursion without knowing the result yet.
2355 @@@ strange case. Straight forward we would create a Phi before
2356 starting the computation of it's predecessors. In this case we will
2357 find a Phi here in any case. The problem is that this implementation
2358 only creates a Phi after computing the predecessors, so that it is
2359 hard to compute self references of this Phi. @@@
2360 There is no simple check for the second subcase. Therefore we check
2361 for a second visit and treat all such cases as the second subcase.
2362 Anyways, the basic situation is the same: we reached a block
2363 on two paths without finding a definition of the value: No Phi
2364 nodes are needed on both paths.
2365 We return this information "Two paths, no Phi needed" by a very tricky
2366 implementation that relies on the fact that an obstack is a stack and
2367 will return a node with the same address on different allocations.
2368 Look also at phi_merge and new_rd_phi_in to understand this.
2369 @@@ Unfortunately this does not work, see testprogram
2370 three_cfpred_example.
2374 /* case 4 -- already visited. */
2375 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2377 /* visited the first time */
2378 set_irn_visited(block, get_irg_visited(current_ir_graph));
2380 /* Get the local valid value */
2381 res = block->attr.block.graph_arr[pos];
2383 /* case 2 -- If the value is actually computed, return it. */
2384 if (res) return res;
2386 if (block->attr.block.matured) { /* case 3 */
2388 /* The Phi has the same amount of ins as the corresponding block. */
2389 int ins = get_irn_arity(block);
2391 NEW_ARR_A (ir_node *, nin, ins);
2393 /* Phi merge collects the predecessors and then creates a node. */
2394 res = phi_merge (block, pos, mode, nin, ins);
2396 } else { /* case 1 */
2397 /* The block is not mature, we don't know how many in's are needed. A Phi
2398 with zero predecessors is created. Such a Phi node is called Phi0
2399 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2400 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2402 The Phi0 has to remember the pos of it's internal value. If the real
2403 Phi is computed, pos is used to update the array with the local
2406 res = new_rd_Phi0 (current_ir_graph, block, mode);
2407 res->attr.phi0_pos = pos;
2408 res->link = block->link;
2412 /* If we get here, the frontend missed a use-before-definition error */
2415 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2416 assert (mode->code >= irm_F && mode->code <= irm_P);
2417 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2418 tarval_mode_null[mode->code]);
2421 /* The local valid value is available now. */
2422 block->attr.block.graph_arr[pos] = res;
2430 it starts the recursion. This causes an Id at the entry of
2431 every block that has no definition of the value! **/
2433 #if USE_EXPLICIT_PHI_IN_STACK
2435 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2436 void free_Phi_in_stack(Phi_in_stack *s) { }
2439 static INLINE ir_node *
2440 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2441 ir_node **in, int ins, ir_node *phi0)
2444 ir_node *res, *known;
2446 /* Allocate a new node on the obstack. The allocation copies the in
2448 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2449 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2451 /* This loop checks whether the Phi has more than one predecessor.
2452 If so, it is a real Phi node and we break the loop. Else the
2453 Phi node merges the same definition on several paths and therefore
2454 is not needed. Don't consider Bad nodes! */
2456 for (i=0; i < ins; ++i)
2460 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2462 /* Optimize self referencing Phis: We can't detect them yet properly, as
2463 they still refer to the Phi0 they will replace. So replace right now. */
2464 if (phi0 && in[i] == phi0) in[i] = res;
2466 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2474 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2477 edges_node_deleted(res, current_ir_graph);
2478 obstack_free (current_ir_graph->obst, res);
2479 if (is_Phi(known)) {
2480 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2481 order, an enclosing Phi know may get superfluous. */
2482 res = optimize_in_place_2(known);
2484 exchange(known, res);
2490 /* A undefined value, e.g., in unreachable code. */
2494 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2495 IRN_VRFY_IRG(res, irg);
2496 /* Memory Phis in endless loops must be kept alive.
2497 As we can't distinguish these easily we keep all of them alive. */
2498 if ((res->op == op_Phi) && (mode == mode_M))
2499 add_End_keepalive(irg->end, res);
2506 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2508 #if PRECISE_EXC_CONTEXT
2510 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2512 /* Construct a new frag_array for node n.
2513 Copy the content from the current graph_arr of the corresponding block:
2514 this is the current state.
2515 Set ProjM(n) as current memory state.
2516 Further the last entry in frag_arr of current block points to n. This
2517 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2519 static INLINE ir_node ** new_frag_arr (ir_node *n)
2524 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2525 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2526 sizeof(ir_node *)*current_ir_graph->n_loc);
2528 /* turn off optimization before allocating Proj nodes, as res isn't
2530 opt = get_opt_optimize(); set_optimize(0);
2531 /* Here we rely on the fact that all frag ops have Memory as first result! */
2532 if (get_irn_op(n) == op_Call)
2533 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2535 assert((pn_Quot_M == pn_DivMod_M) &&
2536 (pn_Quot_M == pn_Div_M) &&
2537 (pn_Quot_M == pn_Mod_M) &&
2538 (pn_Quot_M == pn_Load_M) &&
2539 (pn_Quot_M == pn_Store_M) &&
2540 (pn_Quot_M == pn_Alloc_M) );
2541 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2545 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2550 * returns the frag_arr from a node
2552 static INLINE ir_node **
2553 get_frag_arr (ir_node *n) {
2554 switch (get_irn_opcode(n)) {
2556 return n->attr.call.exc.frag_arr;
2558 return n->attr.a.exc.frag_arr;
2560 return n->attr.load.exc.frag_arr;
2562 return n->attr.store.exc.frag_arr;
2564 return n->attr.except.frag_arr;
2569 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2571 if (!frag_arr[pos]) frag_arr[pos] = val;
2572 if (frag_arr[current_ir_graph->n_loc - 1]) {
2573 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2574 assert(arr != frag_arr && "Endless recursion detected");
2575 set_frag_value(arr, pos, val);
2580 for (i = 0; i < 1000; ++i) {
2581 if (!frag_arr[pos]) {
2582 frag_arr[pos] = val;
2584 if (frag_arr[current_ir_graph->n_loc - 1]) {
2585 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2591 assert(0 && "potential endless recursion");
2596 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2600 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2602 frag_arr = get_frag_arr(cfOp);
2603 res = frag_arr[pos];
2605 if (block->attr.block.graph_arr[pos]) {
2606 /* There was a set_value after the cfOp and no get_value before that
2607 set_value. We must build a Phi node now. */
2608 if (block->attr.block.matured) {
2609 int ins = get_irn_arity(block);
2611 NEW_ARR_A (ir_node *, nin, ins);
2612 res = phi_merge(block, pos, mode, nin, ins);
2614 res = new_rd_Phi0 (current_ir_graph, block, mode);
2615 res->attr.phi0_pos = pos;
2616 res->link = block->link;
2620 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2621 but this should be better: (remove comment if this works) */
2622 /* It's a Phi, we can write this into all graph_arrs with NULL */
2623 set_frag_value(block->attr.block.graph_arr, pos, res);
2625 res = get_r_value_internal(block, pos, mode);
2626 set_frag_value(block->attr.block.graph_arr, pos, res);
2631 #endif /* PRECISE_EXC_CONTEXT */
2634 computes the predecessors for the real phi node, and then
2635 allocates and returns this node. The routine called to allocate the
2636 node might optimize it away and return a real value.
2637 This function must be called with an in-array of proper size. **/
2639 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2641 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2644 /* If this block has no value at pos create a Phi0 and remember it
2645 in graph_arr to break recursions.
2646 Else we may not set graph_arr as there a later value is remembered. */
2648 if (!block->attr.block.graph_arr[pos]) {
2649 if (block == get_irg_start_block(current_ir_graph)) {
2650 /* Collapsing to Bad tarvals is no good idea.
2651 So we call a user-supplied routine here that deals with this case as
2652 appropriate for the given language. Sorrily the only help we can give
2653 here is the position.
2655 Even if all variables are defined before use, it can happen that
2656 we get to the start block, if a Cond has been replaced by a tuple
2657 (bad, jmp). In this case we call the function needlessly, eventually
2658 generating an non existent error.
2659 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2662 if (default_initialize_local_variable) {
2663 ir_node *rem = get_cur_block();
2665 set_cur_block(block);
2666 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2670 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2671 /* We don't need to care about exception ops in the start block.
2672 There are none by definition. */
2673 return block->attr.block.graph_arr[pos];
2675 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2676 block->attr.block.graph_arr[pos] = phi0;
2677 #if PRECISE_EXC_CONTEXT
2678 if (get_opt_precise_exc_context()) {
2679 /* Set graph_arr for fragile ops. Also here we should break recursion.
2680 We could choose a cyclic path through an cfop. But the recursion would
2681 break at some point. */
2682 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2688 /* This loop goes to all predecessor blocks of the block the Phi node
2689 is in and there finds the operands of the Phi node by calling
2690 get_r_value_internal. */
2691 for (i = 1; i <= ins; ++i) {
2692 prevCfOp = skip_Proj(block->in[i]);
2694 if (is_Bad(prevCfOp)) {
2695 /* In case a Cond has been optimized we would get right to the start block
2696 with an invalid definition. */
2697 nin[i-1] = new_Bad();
2700 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2702 if (!is_Bad(prevBlock)) {
2703 #if PRECISE_EXC_CONTEXT
2704 if (get_opt_precise_exc_context() &&
2705 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2706 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2707 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2710 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2712 nin[i-1] = new_Bad();
2716 /* We want to pass the Phi0 node to the constructor: this finds additional
2717 optimization possibilities.
2718 The Phi0 node either is allocated in this function, or it comes from
2719 a former call to get_r_value_internal. In this case we may not yet
2720 exchange phi0, as this is done in mature_immBlock. */
2722 phi0_all = block->attr.block.graph_arr[pos];
2723 if (!((get_irn_op(phi0_all) == op_Phi) &&
2724 (get_irn_arity(phi0_all) == 0) &&
2725 (get_nodes_block(phi0_all) == block)))
2731 /* After collecting all predecessors into the array nin a new Phi node
2732 with these predecessors is created. This constructor contains an
2733 optimization: If all predecessors of the Phi node are identical it
2734 returns the only operand instead of a new Phi node. */
2735 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2737 /* In case we allocated a Phi0 node at the beginning of this procedure,
2738 we need to exchange this Phi0 with the real Phi. */
2740 exchange(phi0, res);
2741 block->attr.block.graph_arr[pos] = res;
2742 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2743 only an optimization. */
2749 /* This function returns the last definition of a variable. In case
2750 this variable was last defined in a previous block, Phi nodes are
2751 inserted. If the part of the firm graph containing the definition
2752 is not yet constructed, a dummy Phi node is returned. */
2754 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2757 /* There are 4 cases to treat.
2759 1. The block is not mature and we visit it the first time. We can not
2760 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2761 predecessors is returned. This node is added to the linked list (field
2762 "link") of the containing block to be completed when this block is
2763 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2766 2. The value is already known in this block, graph_arr[pos] is set and we
2767 visit the block the first time. We can return the value without
2768 creating any new nodes.
2770 3. The block is mature and we visit it the first time. A Phi node needs
2771 to be created (phi_merge). If the Phi is not needed, as all it's
2772 operands are the same value reaching the block through different
2773 paths, it's optimized away and the value itself is returned.
2775 4. The block is mature, and we visit it the second time. Now two
2776 subcases are possible:
2777 * The value was computed completely the last time we were here. This
2778 is the case if there is no loop. We can return the proper value.
2779 * The recursion that visited this node and set the flag did not
2780 return yet. We are computing a value in a loop and need to
2781 break the recursion. This case only happens if we visited
2782 the same block with phi_merge before, which inserted a Phi0.
2783 So we return the Phi0.
2786 /* case 4 -- already visited. */
2787 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2788 /* As phi_merge allocates a Phi0 this value is always defined. Here
2789 is the critical difference of the two algorithms. */
2790 assert(block->attr.block.graph_arr[pos]);
2791 return block->attr.block.graph_arr[pos];
2794 /* visited the first time */
2795 set_irn_visited(block, get_irg_visited(current_ir_graph));
2797 /* Get the local valid value */
2798 res = block->attr.block.graph_arr[pos];
2800 /* case 2 -- If the value is actually computed, return it. */
2801 if (res) { return res; };
2803 if (block->attr.block.matured) { /* case 3 */
2805 /* The Phi has the same amount of ins as the corresponding block. */
2806 int ins = get_irn_arity(block);
2808 NEW_ARR_A (ir_node *, nin, ins);
2810 /* Phi merge collects the predecessors and then creates a node. */
2811 res = phi_merge (block, pos, mode, nin, ins);
2813 } else { /* case 1 */
2814 /* The block is not mature, we don't know how many in's are needed. A Phi
2815 with zero predecessors is created. Such a Phi node is called Phi0
2816 node. The Phi0 is then added to the list of Phi0 nodes in this block
2817 to be matured by mature_immBlock later.
2818 The Phi0 has to remember the pos of it's internal value. If the real
2819 Phi is computed, pos is used to update the array with the local
2821 res = new_rd_Phi0 (current_ir_graph, block, mode);
2822 res->attr.phi0_pos = pos;
2823 res->link = block->link;
2827 /* If we get here, the frontend missed a use-before-definition error */
2830 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2831 assert (mode->code >= irm_F && mode->code <= irm_P);
2832 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2833 get_mode_null(mode));
2836 /* The local valid value is available now. */
2837 block->attr.block.graph_arr[pos] = res;
2842 #endif /* USE_FAST_PHI_CONSTRUCTION */
2844 /* ************************************************************************** */
2847 * Finalize a Block node, when all control flows are known.
2848 * Acceptable parameters are only Block nodes.
2851 mature_immBlock (ir_node *block)
2857 assert (get_irn_opcode(block) == iro_Block);
2858 /* @@@ should be commented in
2859 assert (!get_Block_matured(block) && "Block already matured"); */
2861 if (!get_Block_matured(block)) {
2862 ins = ARR_LEN (block->in)-1;
2863 /* Fix block parameters */
2864 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2866 /* An array for building the Phi nodes. */
2867 NEW_ARR_A (ir_node *, nin, ins);
2869 /* Traverse a chain of Phi nodes attached to this block and mature
2871 for (n = block->link; n; n=next) {
2872 inc_irg_visited(current_ir_graph);
2874 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2877 block->attr.block.matured = 1;
2879 /* Now, as the block is a finished firm node, we can optimize it.
2880 Since other nodes have been allocated since the block was created
2881 we can not free the node on the obstack. Therefore we have to call
2883 Unfortunately the optimization does not change a lot, as all allocated
2884 nodes refer to the unoptimized node.
2885 We can call _2, as global cse has no effect on blocks. */
2886 block = optimize_in_place_2(block);
2887 IRN_VRFY_IRG(block, current_ir_graph);
2892 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2894 return new_bd_Phi(db, current_ir_graph->current_block,
2899 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2901 return new_bd_Const(db, current_ir_graph->start_block,
2906 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2908 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2912 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2914 return new_bd_Const_type(db, current_ir_graph->start_block,
2920 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2922 return new_bd_Id(db, current_ir_graph->current_block,
2927 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2929 return new_bd_Proj(db, current_ir_graph->current_block,
2934 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2937 assert(arg->op == op_Cond);
2938 arg->attr.c.kind = fragmentary;
2939 arg->attr.c.default_proj = max_proj;
2940 res = new_Proj (arg, mode_X, max_proj);
2945 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2947 return new_bd_Conv(db, current_ir_graph->current_block,
2952 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp)
2954 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2958 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2960 return new_bd_Tuple(db, current_ir_graph->current_block,
2965 new_d_Add (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2967 return new_bd_Add(db, current_ir_graph->current_block,
2972 new_d_Sub (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2974 return new_bd_Sub(db, current_ir_graph->current_block,
2980 new_d_Minus (dbg_info *db, ir_node *op, ir_mode *mode)
2982 return new_bd_Minus(db, current_ir_graph->current_block,
2987 new_d_Mul (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2989 return new_bd_Mul(db, current_ir_graph->current_block,
2994 * allocate the frag array
2996 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2997 if (get_opt_precise_exc_context()) {
2998 if ((current_ir_graph->phase_state == phase_building) &&
2999 (get_irn_op(res) == op) && /* Could be optimized away. */
3000 !*frag_store) /* Could be a cse where the arr is already set. */ {
3001 *frag_store = new_frag_arr(res);
3008 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3011 res = new_bd_Quot (db, current_ir_graph->current_block,
3013 res->attr.except.pin_state = op_pin_state_pinned;
3014 #if PRECISE_EXC_CONTEXT
3015 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
3022 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3025 res = new_bd_DivMod (db, current_ir_graph->current_block,
3027 res->attr.except.pin_state = op_pin_state_pinned;
3028 #if PRECISE_EXC_CONTEXT
3029 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
3036 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3039 res = new_bd_Div (db, current_ir_graph->current_block,
3041 res->attr.except.pin_state = op_pin_state_pinned;
3042 #if PRECISE_EXC_CONTEXT
3043 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
3050 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3053 res = new_bd_Mod (db, current_ir_graph->current_block,
3055 res->attr.except.pin_state = op_pin_state_pinned;
3056 #if PRECISE_EXC_CONTEXT
3057 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
3064 new_d_And (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3066 return new_bd_And (db, current_ir_graph->current_block,
3071 new_d_Or (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3073 return new_bd_Or (db, current_ir_graph->current_block,
3078 new_d_Eor (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3080 return new_bd_Eor (db, current_ir_graph->current_block,
3085 new_d_Not (dbg_info *db, ir_node *op, ir_mode *mode)
3087 return new_bd_Not (db, current_ir_graph->current_block,
3092 new_d_Shl (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3094 return new_bd_Shl (db, current_ir_graph->current_block,
3099 new_d_Shr (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3101 return new_bd_Shr (db, current_ir_graph->current_block,
3106 new_d_Shrs (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3108 return new_bd_Shrs (db, current_ir_graph->current_block,
3113 new_d_Rot (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3115 return new_bd_Rot (db, current_ir_graph->current_block,
3120 new_d_Abs (dbg_info *db, ir_node *op, ir_mode *mode)
3122 return new_bd_Abs (db, current_ir_graph->current_block,
3127 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
3129 return new_bd_Cmp (db, current_ir_graph->current_block,
3134 new_d_Jmp (dbg_info *db)
3136 return new_bd_Jmp (db, current_ir_graph->current_block);
3140 new_d_IJmp (dbg_info *db, ir_node *tgt)
3142 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
3146 new_d_Cond (dbg_info *db, ir_node *c)
3148 return new_bd_Cond (db, current_ir_graph->current_block, c);
3152 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
3156 res = new_bd_Call (db, current_ir_graph->current_block,
3157 store, callee, arity, in, tp);
3158 #if PRECISE_EXC_CONTEXT
3159 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
3166 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
3168 return new_bd_Return (db, current_ir_graph->current_block,
3173 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
3175 return new_bd_Raise (db, current_ir_graph->current_block,
3180 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
3183 res = new_bd_Load (db, current_ir_graph->current_block,
3185 #if PRECISE_EXC_CONTEXT
3186 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
3193 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
3196 res = new_bd_Store (db, current_ir_graph->current_block,
3198 #if PRECISE_EXC_CONTEXT
3199 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
3206 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
3210 res = new_bd_Alloc (db, current_ir_graph->current_block,
3211 store, size, alloc_type, where);
3212 #if PRECISE_EXC_CONTEXT
3213 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
3220 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
3221 ir_node *size, ir_type *free_type, where_alloc where)
3223 return new_bd_Free (db, current_ir_graph->current_block,
3224 store, ptr, size, free_type, where);
3228 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
3229 /* GL: objptr was called frame before. Frame was a bad choice for the name
3230 as the operand could as well be a pointer to a dynamic object. */
3232 return new_bd_Sel (db, current_ir_graph->current_block,
3233 store, objptr, 0, NULL, ent);
3237 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
3239 return new_bd_Sel (db, current_ir_graph->current_block,
3240 store, objptr, n_index, index, sel);
3244 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
3246 return (new_bd_InstOf (db, current_ir_graph->current_block,
3247 store, objptr, type));
3251 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
3253 return new_bd_SymConst_type (db, current_ir_graph->start_block,
3258 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
3260 return new_bd_SymConst (db, current_ir_graph->start_block,
3265 new_d_Sync (dbg_info *db, int arity, ir_node** in)
3267 return new_bd_Sync (db, current_ir_graph->current_block,
3274 return _new_d_Bad();
3278 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
3280 return new_bd_Confirm (db, current_ir_graph->current_block,
3285 new_d_Unknown (ir_mode *m)
3287 return new_bd_Unknown(m);
3291 new_d_CallBegin (dbg_info *db, ir_node *call)
3294 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
3299 new_d_EndReg (dbg_info *db)
3302 res = new_bd_EndReg(db, current_ir_graph->current_block);
3307 new_d_EndExcept (dbg_info *db)
3310 res = new_bd_EndExcept(db, current_ir_graph->current_block);
3315 new_d_Break (dbg_info *db)
3317 return new_bd_Break (db, current_ir_graph->current_block);
3321 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
3323 return new_bd_Filter (db, current_ir_graph->current_block,
3330 return _new_d_NoMem();
3334 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
3335 ir_node *ir_true, ir_mode *mode) {
3336 return new_bd_Mux (db, current_ir_graph->current_block,
3337 sel, ir_false, ir_true, mode);
3340 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
3341 ir_node *dst, ir_node *src, ir_type *data_type) {
3343 res = new_bd_CopyB(db, current_ir_graph->current_block,
3344 store, dst, src, data_type);
3345 #if PRECISE_EXC_CONTEXT
3346 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
3351 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
3352 ir_node *idx, ir_node *lower, ir_node *upper) {
3354 res = new_bd_Bound(db, current_ir_graph->current_block,
3355 store, idx, lower, upper);
3356 #if PRECISE_EXC_CONTEXT
3357 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
3362 /* ********************************************************************* */
3363 /* Comfortable interface with automatic Phi node construction. */
3364 /* (Uses also constructors of ?? interface, except new_Block. */
3365 /* ********************************************************************* */
3367 /* Block construction */
3368 /* immature Block without predecessors */
3369 ir_node *new_d_immBlock (dbg_info *db) {
3372 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3373 /* creates a new dynamic in-array as length of in is -1 */
3374 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3375 current_ir_graph->current_block = res;
3376 res->attr.block.matured = 0;
3377 res->attr.block.dead = 0;
3378 /* res->attr.block.exc = exc_normal; */
3379 /* res->attr.block.handler_entry = 0; */
3380 res->attr.block.irg = current_ir_graph;
3381 res->attr.block.backedge = NULL;
3382 res->attr.block.in_cg = NULL;
3383 res->attr.block.cg_backedge = NULL;
3384 set_Block_block_visited(res, 0);
3386 /* Create and initialize array for Phi-node construction. */
3387 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3388 current_ir_graph->n_loc);
3389 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3391 /* Immature block may not be optimized! */
3392 IRN_VRFY_IRG(res, current_ir_graph);
3398 new_immBlock (void) {
3399 return new_d_immBlock(NULL);
3402 /* add an edge to a jmp/control flow node */
3404 add_immBlock_pred (ir_node *block, ir_node *jmp)
3406 if (block->attr.block.matured) {
3407 assert(0 && "Error: Block already matured!\n");
3410 assert(jmp != NULL);
3411 ARR_APP1(ir_node *, block->in, jmp);
3415 /* changing the current block */
3417 set_cur_block (ir_node *target) {
3418 current_ir_graph->current_block = target;
3421 /* ************************ */
3422 /* parameter administration */
3424 /* get a value from the parameter array from the current block by its index */
3426 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3428 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3429 inc_irg_visited(current_ir_graph);
3431 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3433 /* get a value from the parameter array from the current block by its index */
3435 get_value (int pos, ir_mode *mode)
3437 return get_d_value(NULL, pos, mode);
3440 /* set a value at position pos in the parameter array from the current block */
3442 set_value (int pos, ir_node *value)
3444 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3445 assert(pos+1 < current_ir_graph->n_loc);
3446 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3449 /* get the current store */
3453 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3454 /* GL: one could call get_value instead */
3455 inc_irg_visited(current_ir_graph);
3456 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3459 /* set the current store */
3461 set_store (ir_node *store)
3463 /* GL: one could call set_value instead */
3464 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3465 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3469 keep_alive (ir_node *ka) {
3470 add_End_keepalive(current_ir_graph->end, ka);
3473 /* --- Useful access routines --- */
3474 /* Returns the current block of the current graph. To set the current
3475 block use set_cur_block. */
3476 ir_node *get_cur_block(void) {
3477 return get_irg_current_block(current_ir_graph);
3480 /* Returns the frame type of the current graph */
3481 ir_type *get_cur_frame_type(void) {
3482 return get_irg_frame_type(current_ir_graph);
3486 /* ********************************************************************* */
3489 /* call once for each run of the library */
3491 init_cons(uninitialized_local_variable_func_t *func)
3493 default_initialize_local_variable = func;
3496 /* call for each graph */
3498 irg_finalize_cons (ir_graph *irg) {
3499 irg->phase_state = phase_high;
3503 irp_finalize_cons (void) {
3504 int i, n_irgs = get_irp_n_irgs();
3505 for (i = 0; i < n_irgs; i++) {
3506 irg_finalize_cons(get_irp_irg(i));
3508 irp->phase_state = phase_high;\
3514 ir_node *new_Block(int arity, ir_node **in) {
3515 return new_d_Block(NULL, arity, in);
3517 ir_node *new_Start (void) {
3518 return new_d_Start(NULL);
3520 ir_node *new_End (void) {
3521 return new_d_End(NULL);
3523 ir_node *new_Jmp (void) {
3524 return new_d_Jmp(NULL);
3526 ir_node *new_IJmp (ir_node *tgt) {
3527 return new_d_IJmp(NULL, tgt);
3529 ir_node *new_Cond (ir_node *c) {
3530 return new_d_Cond(NULL, c);
3532 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3533 return new_d_Return(NULL, store, arity, in);
3535 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3536 return new_d_Raise(NULL, store, obj);
3538 ir_node *new_Const (ir_mode *mode, tarval *con) {
3539 return new_d_Const(NULL, mode, con);
3542 ir_node *new_Const_long(ir_mode *mode, long value)
3544 return new_d_Const_long(NULL, mode, value);
3547 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3548 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3551 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3552 return new_d_SymConst(NULL, value, kind);
3554 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3555 return new_d_simpleSel(NULL, store, objptr, ent);
3557 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3559 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3561 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3562 return new_d_InstOf (NULL, store, objptr, ent);
3564 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3566 return new_d_Call(NULL, store, callee, arity, in, tp);
3568 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3569 return new_d_Add(NULL, op1, op2, mode);
3571 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3572 return new_d_Sub(NULL, op1, op2, mode);
3574 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3575 return new_d_Minus(NULL, op, mode);
3577 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3578 return new_d_Mul(NULL, op1, op2, mode);
3580 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3581 return new_d_Quot(NULL, memop, op1, op2);
3583 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3584 return new_d_DivMod(NULL, memop, op1, op2);
3586 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3587 return new_d_Div(NULL, memop, op1, op2);
3589 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3590 return new_d_Mod(NULL, memop, op1, op2);
3592 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3593 return new_d_Abs(NULL, op, mode);
3595 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3596 return new_d_And(NULL, op1, op2, mode);
3598 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3599 return new_d_Or(NULL, op1, op2, mode);
3601 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3602 return new_d_Eor(NULL, op1, op2, mode);
3604 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3605 return new_d_Not(NULL, op, mode);
3607 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3608 return new_d_Shl(NULL, op, k, mode);
3610 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3611 return new_d_Shr(NULL, op, k, mode);
3613 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3614 return new_d_Shrs(NULL, op, k, mode);
3616 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3617 return new_d_Rot(NULL, op, k, mode);
3619 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3620 return new_d_Cmp(NULL, op1, op2);
3622 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3623 return new_d_Conv(NULL, op, mode);
3625 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3626 return new_d_Cast(NULL, op, to_tp);
3628 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3629 return new_d_Phi(NULL, arity, in, mode);
3631 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3632 return new_d_Load(NULL, store, addr, mode);
3634 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3635 return new_d_Store(NULL, store, addr, val);
3637 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3638 where_alloc where) {
3639 return new_d_Alloc(NULL, store, size, alloc_type, where);
3641 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3642 ir_type *free_type, where_alloc where) {
3643 return new_d_Free(NULL, store, ptr, size, free_type, where);
3645 ir_node *new_Sync (int arity, ir_node **in) {
3646 return new_d_Sync(NULL, arity, in);
3648 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3649 return new_d_Proj(NULL, arg, mode, proj);
3651 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3652 return new_d_defaultProj(NULL, arg, max_proj);
3654 ir_node *new_Tuple (int arity, ir_node **in) {
3655 return new_d_Tuple(NULL, arity, in);
3657 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3658 return new_d_Id(NULL, val, mode);
3660 ir_node *new_Bad (void) {
3663 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3664 return new_d_Confirm (NULL, val, bound, cmp);
3666 ir_node *new_Unknown(ir_mode *m) {
3667 return new_d_Unknown(m);
3669 ir_node *new_CallBegin (ir_node *callee) {
3670 return new_d_CallBegin(NULL, callee);
3672 ir_node *new_EndReg (void) {
3673 return new_d_EndReg(NULL);
3675 ir_node *new_EndExcept (void) {
3676 return new_d_EndExcept(NULL);
3678 ir_node *new_Break (void) {
3679 return new_d_Break(NULL);
3681 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3682 return new_d_Filter(NULL, arg, mode, proj);
3684 ir_node *new_NoMem (void) {
3685 return new_d_NoMem();
3687 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3688 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3690 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3691 return new_d_CopyB(NULL, store, dst, src, data_type);
3693 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3694 return new_d_Bound(NULL, store, idx, lower, upper);