3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* Constructs a Block with a fixed number of predecessors.
67 Does not set current_block. Can not be used with automatic
68 Phi node construction. */
70 new_bd_Block (dbg_info *db, int arity, ir_node **in)
73 ir_graph *irg = current_ir_graph;
75 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
76 set_Block_matured(res, 1);
77 set_Block_block_visited(res, 0);
79 /* res->attr.block.exc = exc_normal; */
80 /* res->attr.block.handler_entry = 0; */
81 res->attr.block.dead = 0;
82 res->attr.block.irg = irg;
83 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
84 res->attr.block.in_cg = NULL;
85 res->attr.block.cg_backedge = NULL;
86 res->attr.block.extblk = NULL;
88 IRN_VRFY_IRG(res, irg);
93 new_bd_Start (dbg_info *db, ir_node *block)
96 ir_graph *irg = current_ir_graph;
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_bd_End (dbg_info *db, ir_node *block)
109 ir_graph *irg = current_ir_graph;
111 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
113 IRN_VRFY_IRG(res, irg);
117 /* Creates a Phi node with all predecessors. Calling this constructor
118 is only allowed if the corresponding block is mature. */
120 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 ir_graph *irg = current_ir_graph;
127 /* Don't assert that block matured: the use of this constructor is strongly
129 if ( get_Block_matured(block) )
130 assert( get_irn_arity(block) == arity );
132 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
134 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
136 for (i = arity-1; i >= 0; i--)
137 if (get_irn_op(in[i]) == op_Unknown) {
142 if (!has_unknown) res = optimize_node (res);
143 IRN_VRFY_IRG(res, irg);
145 /* Memory Phis in endless loops must be kept alive.
146 As we can't distinguish these easily we keep all of them alive. */
147 if ((res->op == op_Phi) && (mode == mode_M))
148 add_End_keepalive(irg->end, res);
153 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
156 ir_graph *irg = current_ir_graph;
158 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
159 res->attr.con.tv = con;
160 set_Const_type(res, tp); /* Call method because of complex assertion. */
161 res = optimize_node (res);
162 assert(get_Const_type(res) == tp);
163 IRN_VRFY_IRG(res, irg);
169 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
171 ir_graph *irg = current_ir_graph;
173 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
177 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
179 ir_graph *irg = current_ir_graph;
181 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
185 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
191 res = optimize_node(res);
192 IRN_VRFY_IRG(res, irg);
197 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
204 res->attr.proj = proj;
207 assert(get_Proj_pred(res));
208 assert(get_nodes_block(get_Proj_pred(res)));
210 res = optimize_node(res);
212 IRN_VRFY_IRG(res, irg);
218 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
222 ir_graph *irg = current_ir_graph;
224 assert(arg->op == op_Cond);
225 arg->attr.c.kind = fragmentary;
226 arg->attr.c.default_proj = max_proj;
227 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
232 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
235 ir_graph *irg = current_ir_graph;
237 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
238 res = optimize_node(res);
239 IRN_VRFY_IRG(res, irg);
244 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
247 ir_graph *irg = current_ir_graph;
249 assert(is_atomic_type(to_tp));
251 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
252 res->attr.cast.totype = to_tp;
253 res = optimize_node(res);
254 IRN_VRFY_IRG(res, irg);
259 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
265 res = optimize_node (res);
266 IRN_VRFY_IRG(res, irg);
271 new_bd_Add (dbg_info *db, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
276 ir_graph *irg = current_ir_graph;
280 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
281 res = optimize_node(res);
282 IRN_VRFY_IRG(res, irg);
287 new_bd_Sub (dbg_info *db, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_graph *irg = current_ir_graph;
296 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
297 res = optimize_node (res);
298 IRN_VRFY_IRG(res, irg);
304 new_bd_Minus (dbg_info *db, ir_node *block,
305 ir_node *op, ir_mode *mode)
308 ir_graph *irg = current_ir_graph;
310 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
311 res = optimize_node(res);
312 IRN_VRFY_IRG(res, irg);
317 new_bd_Mul (dbg_info *db, ir_node *block,
318 ir_node *op1, ir_node *op2, ir_mode *mode)
322 ir_graph *irg = current_ir_graph;
326 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
327 res = optimize_node(res);
328 IRN_VRFY_IRG(res, irg);
333 new_bd_Quot (dbg_info *db, ir_node *block,
334 ir_node *memop, ir_node *op1, ir_node *op2)
338 ir_graph *irg = current_ir_graph;
343 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
344 res = optimize_node(res);
345 IRN_VRFY_IRG(res, irg);
350 new_bd_DivMod (dbg_info *db, ir_node *block,
351 ir_node *memop, ir_node *op1, ir_node *op2)
355 ir_graph *irg = current_ir_graph;
360 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
367 new_bd_Div (dbg_info *db, ir_node *block,
368 ir_node *memop, ir_node *op1, ir_node *op2)
372 ir_graph *irg = current_ir_graph;
377 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
378 res = optimize_node(res);
379 IRN_VRFY_IRG(res, irg);
384 new_bd_Mod (dbg_info *db, ir_node *block,
385 ir_node *memop, ir_node *op1, ir_node *op2)
389 ir_graph *irg = current_ir_graph;
394 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
395 res = optimize_node(res);
396 IRN_VRFY_IRG(res, irg);
401 new_bd_And (dbg_info *db, ir_node *block,
402 ir_node *op1, ir_node *op2, ir_mode *mode)
406 ir_graph *irg = current_ir_graph;
410 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
411 res = optimize_node(res);
412 IRN_VRFY_IRG(res, irg);
417 new_bd_Or (dbg_info *db, ir_node *block,
418 ir_node *op1, ir_node *op2, ir_mode *mode)
422 ir_graph *irg = current_ir_graph;
426 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
427 res = optimize_node(res);
428 IRN_VRFY_IRG(res, irg);
433 new_bd_Eor (dbg_info *db, ir_node *block,
434 ir_node *op1, ir_node *op2, ir_mode *mode)
438 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
443 res = optimize_node (res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_Not (dbg_info *db, ir_node *block,
450 ir_node *op, ir_mode *mode)
453 ir_graph *irg = current_ir_graph;
455 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
456 res = optimize_node(res);
457 IRN_VRFY_IRG(res, irg);
462 new_bd_Shl (dbg_info *db, ir_node *block,
463 ir_node *op, ir_node *k, ir_mode *mode)
467 ir_graph *irg = current_ir_graph;
471 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Shr (dbg_info *db, ir_node *block,
479 ir_node *op, ir_node *k, ir_mode *mode)
483 ir_graph *irg = current_ir_graph;
487 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_bd_Shrs (dbg_info *db, ir_node *block,
495 ir_node *op, ir_node *k, ir_mode *mode)
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
504 res = optimize_node(res);
505 IRN_VRFY_IRG(res, irg);
510 new_bd_Rot (dbg_info *db, ir_node *block,
511 ir_node *op, ir_node *k, ir_mode *mode)
515 ir_graph *irg = current_ir_graph;
519 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
520 res = optimize_node(res);
521 IRN_VRFY_IRG(res, irg);
526 new_bd_Abs (dbg_info *db, ir_node *block,
527 ir_node *op, ir_mode *mode)
530 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
533 res = optimize_node (res);
534 IRN_VRFY_IRG(res, irg);
539 new_bd_Cmp (dbg_info *db, ir_node *block,
540 ir_node *op1, ir_node *op2)
544 ir_graph *irg = current_ir_graph;
549 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Jmp (dbg_info *db, ir_node *block)
559 ir_graph *irg = current_ir_graph;
561 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
562 res = optimize_node (res);
563 IRN_VRFY_IRG (res, irg);
568 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
571 ir_graph *irg = current_ir_graph;
573 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
574 res = optimize_node (res);
575 IRN_VRFY_IRG (res, irg);
577 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
583 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
586 ir_graph *irg = current_ir_graph;
588 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
589 res->attr.c.kind = dense;
590 res->attr.c.default_proj = 0;
591 res->attr.c.pred = COND_JMP_PRED_NONE;
592 res = optimize_node (res);
593 IRN_VRFY_IRG(res, irg);
598 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
599 ir_node *callee, int arity, ir_node **in, ir_type *tp)
604 ir_graph *irg = current_ir_graph;
607 NEW_ARR_A(ir_node *, r_in, r_arity);
610 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
612 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
614 assert((get_unknown_type() == tp) || is_Method_type(tp));
615 set_Call_type(res, tp);
616 res->attr.call.exc.pin_state = op_pin_state_pinned;
617 res->attr.call.callee_arr = NULL;
618 res = optimize_node(res);
619 IRN_VRFY_IRG(res, irg);
624 new_bd_Return (dbg_info *db, ir_node *block,
625 ir_node *store, int arity, ir_node **in)
630 ir_graph *irg = current_ir_graph;
633 NEW_ARR_A (ir_node *, r_in, r_arity);
635 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
636 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
647 ir_graph *irg = current_ir_graph;
651 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
658 new_bd_Load (dbg_info *db, ir_node *block,
659 ir_node *store, ir_node *adr, ir_mode *mode)
663 ir_graph *irg = current_ir_graph;
667 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
668 res->attr.load.exc.pin_state = op_pin_state_pinned;
669 res->attr.load.load_mode = mode;
670 res->attr.load.volatility = volatility_non_volatile;
671 res = optimize_node(res);
672 IRN_VRFY_IRG(res, irg);
677 new_bd_Store (dbg_info *db, ir_node *block,
678 ir_node *store, ir_node *adr, ir_node *val)
682 ir_graph *irg = current_ir_graph;
687 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
688 res->attr.store.exc.pin_state = op_pin_state_pinned;
689 res->attr.store.volatility = volatility_non_volatile;
690 res = optimize_node(res);
691 IRN_VRFY_IRG(res, irg);
696 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
697 ir_node *size, ir_type *alloc_type, where_alloc where)
701 ir_graph *irg = current_ir_graph;
705 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
706 res->attr.a.exc.pin_state = op_pin_state_pinned;
707 res->attr.a.where = where;
708 res->attr.a.type = alloc_type;
709 res = optimize_node(res);
710 IRN_VRFY_IRG(res, irg);
715 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
716 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
720 ir_graph *irg = current_ir_graph;
725 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
726 res->attr.f.where = where;
727 res->attr.f.type = free_type;
728 res = optimize_node(res);
729 IRN_VRFY_IRG(res, irg);
734 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
735 int arity, ir_node **in, entity *ent)
740 ir_graph *irg = current_ir_graph;
742 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
745 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
748 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
750 * FIXM: Sel's can select functions which should be of mode mode_P_code.
752 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
753 res->attr.s.ent = ent;
754 res = optimize_node(res);
755 IRN_VRFY_IRG(res, irg);
760 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
761 ir_node *objptr, ir_type *type)
766 ir_graph *irg = current_ir_graph;
769 NEW_ARR_A(ir_node *, r_in, r_arity);
773 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
774 res->attr.io.type = type;
776 /* res = optimize(res); */
777 IRN_VRFY_IRG(res, irg);
782 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
783 symconst_kind symkind, ir_type *tp) {
786 ir_graph *irg = current_ir_graph;
788 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
789 mode = mode_P_data; /* FIXME: can be mode_P_code */
793 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
795 res->attr.i.num = symkind;
796 res->attr.i.sym = value;
799 res = optimize_node(res);
800 IRN_VRFY_IRG(res, irg);
805 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
806 symconst_kind symkind)
808 ir_graph *irg = current_ir_graph;
810 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
815 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
818 ir_graph *irg = current_ir_graph;
820 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
821 res = optimize_node(res);
822 IRN_VRFY_IRG(res, irg);
827 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
829 ir_node *in[2], *res;
830 ir_graph *irg = current_ir_graph;
834 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
835 res->attr.confirm_cmp = cmp;
836 res = optimize_node (res);
837 IRN_VRFY_IRG(res, irg);
841 /* this function is often called with current_ir_graph unset */
843 new_bd_Unknown (ir_mode *m)
846 ir_graph *irg = current_ir_graph;
848 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
849 res = optimize_node(res);
854 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
858 ir_graph *irg = current_ir_graph;
860 in[0] = get_Call_ptr(call);
861 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
862 /* res->attr.callbegin.irg = irg; */
863 res->attr.callbegin.call = call;
864 res = optimize_node(res);
865 IRN_VRFY_IRG(res, irg);
870 new_bd_EndReg (dbg_info *db, ir_node *block)
873 ir_graph *irg = current_ir_graph;
875 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
877 IRN_VRFY_IRG(res, irg);
882 new_bd_EndExcept (dbg_info *db, ir_node *block)
885 ir_graph *irg = current_ir_graph;
887 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
888 irg->end_except = res;
889 IRN_VRFY_IRG (res, irg);
894 new_bd_Break (dbg_info *db, ir_node *block)
897 ir_graph *irg = current_ir_graph;
899 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
900 res = optimize_node(res);
901 IRN_VRFY_IRG(res, irg);
906 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
910 ir_graph *irg = current_ir_graph;
912 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
913 res->attr.filter.proj = proj;
914 res->attr.filter.in_cg = NULL;
915 res->attr.filter.backedge = NULL;
918 assert(get_Proj_pred(res));
919 assert(get_nodes_block(get_Proj_pred(res)));
921 res = optimize_node(res);
922 IRN_VRFY_IRG(res, irg);
927 new_bd_Mux (dbg_info *db, ir_node *block,
928 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
932 ir_graph *irg = current_ir_graph;
938 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
941 res = optimize_node(res);
942 IRN_VRFY_IRG(res, irg);
947 new_bd_CopyB (dbg_info *db, ir_node *block,
948 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
952 ir_graph *irg = current_ir_graph;
958 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
960 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
961 res->attr.copyb.data_type = data_type;
962 res = optimize_node(res);
963 IRN_VRFY_IRG(res, irg);
968 new_bd_Bound (dbg_info *db, ir_node *block,
969 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
973 ir_graph *irg = current_ir_graph;
980 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
982 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
983 res = optimize_node(res);
984 IRN_VRFY_IRG(res, irg);
989 new_bd_Keep(dbg_info *db, ir_node *block, int n, ir_node *in[])
991 ir_graph *irg = current_ir_graph;
994 res = new_ir_node(db, irg, block, op_Keep, mode_ANY, n, in);
997 res = optimize_node(res);
998 IRN_VRFY_IRG(res, irg);
1002 /* --------------------------------------------- */
1003 /* private interfaces, for professional use only */
1004 /* --------------------------------------------- */
1006 /* Constructs a Block with a fixed number of predecessors.
1007 Does not set current_block. Can not be used with automatic
1008 Phi node construction. */
1010 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
1012 ir_graph *rem = current_ir_graph;
1015 current_ir_graph = irg;
1016 res = new_bd_Block (db, arity, in);
1017 current_ir_graph = rem;
1023 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
1025 ir_graph *rem = current_ir_graph;
1028 current_ir_graph = irg;
1029 res = new_bd_Start (db, block);
1030 current_ir_graph = rem;
1036 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
1039 ir_graph *rem = current_ir_graph;
1041 current_ir_graph = rem;
1042 res = new_bd_End (db, block);
1043 current_ir_graph = rem;
1048 /* Creates a Phi node with all predecessors. Calling this constructor
1049 is only allowed if the corresponding block is mature. */
1051 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
1054 ir_graph *rem = current_ir_graph;
1056 current_ir_graph = irg;
1057 res = new_bd_Phi (db, block,arity, in, mode);
1058 current_ir_graph = rem;
1064 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
1067 ir_graph *rem = current_ir_graph;
1069 current_ir_graph = irg;
1070 res = new_bd_Const_type (db, block, mode, con, tp);
1071 current_ir_graph = rem;
1077 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
1080 ir_graph *rem = current_ir_graph;
1082 current_ir_graph = irg;
1083 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
1084 current_ir_graph = rem;
1090 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
1092 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
1096 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
1099 ir_graph *rem = current_ir_graph;
1101 current_ir_graph = irg;
1102 res = new_bd_Id(db, block, val, mode);
1103 current_ir_graph = rem;
1109 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1113 ir_graph *rem = current_ir_graph;
1115 current_ir_graph = irg;
1116 res = new_bd_Proj(db, block, arg, mode, proj);
1117 current_ir_graph = rem;
1123 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1127 ir_graph *rem = current_ir_graph;
1129 current_ir_graph = irg;
1130 res = new_bd_defaultProj(db, block, arg, max_proj);
1131 current_ir_graph = rem;
1137 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1140 ir_graph *rem = current_ir_graph;
1142 current_ir_graph = irg;
1143 res = new_bd_Conv(db, block, op, mode);
1144 current_ir_graph = rem;
1150 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1153 ir_graph *rem = current_ir_graph;
1155 current_ir_graph = irg;
1156 res = new_bd_Cast(db, block, op, to_tp);
1157 current_ir_graph = rem;
1163 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1166 ir_graph *rem = current_ir_graph;
1168 current_ir_graph = irg;
1169 res = new_bd_Tuple(db, block, arity, in);
1170 current_ir_graph = rem;
1176 new_rd_Add (dbg_info *db, ir_graph *irg, ir_node *block,
1177 ir_node *op1, ir_node *op2, ir_mode *mode)
1180 ir_graph *rem = current_ir_graph;
1182 current_ir_graph = irg;
1183 res = new_bd_Add(db, block, op1, op2, mode);
1184 current_ir_graph = rem;
1190 new_rd_Sub (dbg_info *db, ir_graph *irg, ir_node *block,
1191 ir_node *op1, ir_node *op2, ir_mode *mode)
1194 ir_graph *rem = current_ir_graph;
1196 current_ir_graph = irg;
1197 res = new_bd_Sub(db, block, op1, op2, mode);
1198 current_ir_graph = rem;
1204 new_rd_Minus (dbg_info *db, ir_graph *irg, ir_node *block,
1205 ir_node *op, ir_mode *mode)
1208 ir_graph *rem = current_ir_graph;
1210 current_ir_graph = irg;
1211 res = new_bd_Minus(db, block, op, mode);
1212 current_ir_graph = rem;
1218 new_rd_Mul (dbg_info *db, ir_graph *irg, ir_node *block,
1219 ir_node *op1, ir_node *op2, ir_mode *mode)
1222 ir_graph *rem = current_ir_graph;
1224 current_ir_graph = irg;
1225 res = new_bd_Mul(db, block, op1, op2, mode);
1226 current_ir_graph = rem;
1232 new_rd_Quot (dbg_info *db, ir_graph *irg, ir_node *block,
1233 ir_node *memop, ir_node *op1, ir_node *op2)
1236 ir_graph *rem = current_ir_graph;
1238 current_ir_graph = irg;
1239 res = new_bd_Quot(db, block, memop, op1, op2);
1240 current_ir_graph = rem;
1246 new_rd_DivMod (dbg_info *db, ir_graph *irg, ir_node *block,
1247 ir_node *memop, ir_node *op1, ir_node *op2)
1250 ir_graph *rem = current_ir_graph;
1252 current_ir_graph = irg;
1253 res = new_bd_DivMod(db, block, memop, op1, op2);
1254 current_ir_graph = rem;
1260 new_rd_Div (dbg_info *db, ir_graph *irg, ir_node *block,
1261 ir_node *memop, ir_node *op1, ir_node *op2)
1264 ir_graph *rem = current_ir_graph;
1266 current_ir_graph = irg;
1267 res = new_bd_Div (db, block, memop, op1, op2);
1268 current_ir_graph =rem;
1274 new_rd_Mod (dbg_info *db, ir_graph *irg, ir_node *block,
1275 ir_node *memop, ir_node *op1, ir_node *op2)
1278 ir_graph *rem = current_ir_graph;
1280 current_ir_graph = irg;
1281 res = new_bd_Mod(db, block, memop, op1, op2);
1282 current_ir_graph = rem;
1288 new_rd_And (dbg_info *db, ir_graph *irg, ir_node *block,
1289 ir_node *op1, ir_node *op2, ir_mode *mode)
1292 ir_graph *rem = current_ir_graph;
1294 current_ir_graph = irg;
1295 res = new_bd_And(db, block, op1, op2, mode);
1296 current_ir_graph = rem;
1302 new_rd_Or (dbg_info *db, ir_graph *irg, ir_node *block,
1303 ir_node *op1, ir_node *op2, ir_mode *mode)
1306 ir_graph *rem = current_ir_graph;
1308 current_ir_graph = irg;
1309 res = new_bd_Or(db, block, op1, op2, mode);
1310 current_ir_graph = rem;
1316 new_rd_Eor (dbg_info *db, ir_graph *irg, ir_node *block,
1317 ir_node *op1, ir_node *op2, ir_mode *mode)
1320 ir_graph *rem = current_ir_graph;
1322 current_ir_graph = irg;
1323 res = new_bd_Eor(db, block, op1, op2, mode);
1324 current_ir_graph = rem;
1330 new_rd_Not (dbg_info *db, ir_graph *irg, ir_node *block,
1331 ir_node *op, ir_mode *mode)
1334 ir_graph *rem = current_ir_graph;
1336 current_ir_graph = irg;
1337 res = new_bd_Not(db, block, op, mode);
1338 current_ir_graph = rem;
1344 new_rd_Shl (dbg_info *db, ir_graph *irg, ir_node *block,
1345 ir_node *op, ir_node *k, ir_mode *mode)
1348 ir_graph *rem = current_ir_graph;
1350 current_ir_graph = irg;
1351 res = new_bd_Shl (db, block, op, k, mode);
1352 current_ir_graph = rem;
1358 new_rd_Shr (dbg_info *db, ir_graph *irg, ir_node *block,
1359 ir_node *op, ir_node *k, ir_mode *mode)
1362 ir_graph *rem = current_ir_graph;
1364 current_ir_graph = irg;
1365 res = new_bd_Shr(db, block, op, k, mode);
1366 current_ir_graph = rem;
1372 new_rd_Shrs (dbg_info *db, ir_graph *irg, ir_node *block,
1373 ir_node *op, ir_node *k, ir_mode *mode)
1376 ir_graph *rem = current_ir_graph;
1378 current_ir_graph = irg;
1379 res = new_bd_Shrs(db, block, op, k, mode);
1380 current_ir_graph = rem;
1386 new_rd_Rot (dbg_info *db, ir_graph *irg, ir_node *block,
1387 ir_node *op, ir_node *k, ir_mode *mode)
1390 ir_graph *rem = current_ir_graph;
1392 current_ir_graph = irg;
1393 res = new_bd_Rot(db, block, op, k, mode);
1394 current_ir_graph = rem;
1400 new_rd_Abs (dbg_info *db, ir_graph *irg, ir_node *block,
1401 ir_node *op, ir_mode *mode)
1404 ir_graph *rem = current_ir_graph;
1406 current_ir_graph = irg;
1407 res = new_bd_Abs(db, block, op, mode);
1408 current_ir_graph = rem;
1414 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1415 ir_node *op1, ir_node *op2)
1418 ir_graph *rem = current_ir_graph;
1420 current_ir_graph = irg;
1421 res = new_bd_Cmp(db, block, op1, op2);
1422 current_ir_graph = rem;
1428 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1431 ir_graph *rem = current_ir_graph;
1433 current_ir_graph = irg;
1434 res = new_bd_Jmp(db, block);
1435 current_ir_graph = rem;
1441 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1444 ir_graph *rem = current_ir_graph;
1446 current_ir_graph = irg;
1447 res = new_bd_IJmp(db, block, tgt);
1448 current_ir_graph = rem;
1454 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1457 ir_graph *rem = current_ir_graph;
1459 current_ir_graph = irg;
1460 res = new_bd_Cond(db, block, c);
1461 current_ir_graph = rem;
1467 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1468 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1471 ir_graph *rem = current_ir_graph;
1473 current_ir_graph = irg;
1474 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1475 current_ir_graph = rem;
1481 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1482 ir_node *store, int arity, ir_node **in)
1485 ir_graph *rem = current_ir_graph;
1487 current_ir_graph = irg;
1488 res = new_bd_Return(db, block, store, arity, in);
1489 current_ir_graph = rem;
1495 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1498 ir_graph *rem = current_ir_graph;
1500 current_ir_graph = irg;
1501 res = new_bd_Raise(db, block, store, obj);
1502 current_ir_graph = rem;
1508 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1509 ir_node *store, ir_node *adr, ir_mode *mode)
1512 ir_graph *rem = current_ir_graph;
1514 current_ir_graph = irg;
1515 res = new_bd_Load(db, block, store, adr, mode);
1516 current_ir_graph = rem;
1522 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1523 ir_node *store, ir_node *adr, ir_node *val)
1526 ir_graph *rem = current_ir_graph;
1528 current_ir_graph = irg;
1529 res = new_bd_Store(db, block, store, adr, val);
1530 current_ir_graph = rem;
1536 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1537 ir_node *size, ir_type *alloc_type, where_alloc where)
1540 ir_graph *rem = current_ir_graph;
1542 current_ir_graph = irg;
1543 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1544 current_ir_graph = rem;
1550 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1551 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1554 ir_graph *rem = current_ir_graph;
1556 current_ir_graph = irg;
1557 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1558 current_ir_graph = rem;
1564 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1565 ir_node *store, ir_node *objptr, entity *ent)
1568 ir_graph *rem = current_ir_graph;
1570 current_ir_graph = irg;
1571 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1572 current_ir_graph = rem;
1578 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1579 int arity, ir_node **in, entity *ent)
1582 ir_graph *rem = current_ir_graph;
1584 current_ir_graph = irg;
1585 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1586 current_ir_graph = rem;
1592 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1593 ir_node *objptr, ir_type *type)
1596 ir_graph *rem = current_ir_graph;
1598 current_ir_graph = irg;
1599 res = new_bd_InstOf(db, block, store, objptr, type);
1600 current_ir_graph = rem;
1606 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1607 symconst_kind symkind, ir_type *tp)
1610 ir_graph *rem = current_ir_graph;
1612 current_ir_graph = irg;
1613 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1614 current_ir_graph = rem;
1620 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1621 symconst_kind symkind)
1623 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1627 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1629 symconst_symbol sym = {(ir_type *)symbol};
1630 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1633 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1634 symconst_symbol sym = {(ir_type *)symbol};
1635 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1638 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1639 symconst_symbol sym = {symbol};
1640 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1643 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1644 symconst_symbol sym = {symbol};
1645 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1649 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1652 ir_graph *rem = current_ir_graph;
1654 current_ir_graph = irg;
1655 res = new_bd_Sync(db, block, arity, in);
1656 current_ir_graph = rem;
1662 new_rd_Bad (ir_graph *irg)
1668 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1671 ir_graph *rem = current_ir_graph;
1673 current_ir_graph = irg;
1674 res = new_bd_Confirm(db, block, val, bound, cmp);
1675 current_ir_graph = rem;
1680 /* this function is often called with current_ir_graph unset */
1682 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1685 ir_graph *rem = current_ir_graph;
1687 current_ir_graph = irg;
1688 res = new_bd_Unknown(m);
1689 current_ir_graph = rem;
1695 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1698 ir_graph *rem = current_ir_graph;
1700 current_ir_graph = irg;
1701 res = new_bd_CallBegin(db, block, call);
1702 current_ir_graph = rem;
1708 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1712 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1714 IRN_VRFY_IRG(res, irg);
1719 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1723 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1724 irg->end_except = res;
1725 IRN_VRFY_IRG (res, irg);
1730 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1733 ir_graph *rem = current_ir_graph;
1735 current_ir_graph = irg;
1736 res = new_bd_Break(db, block);
1737 current_ir_graph = rem;
1743 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1747 ir_graph *rem = current_ir_graph;
1749 current_ir_graph = irg;
1750 res = new_bd_Filter(db, block, arg, mode, proj);
1751 current_ir_graph = rem;
1757 new_rd_NoMem (ir_graph *irg) {
1762 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1763 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1766 ir_graph *rem = current_ir_graph;
1768 current_ir_graph = irg;
1769 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1770 current_ir_graph = rem;
1775 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1776 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1779 ir_graph *rem = current_ir_graph;
1781 current_ir_graph = irg;
1782 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1783 current_ir_graph = rem;
1788 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1789 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1792 ir_graph *rem = current_ir_graph;
1794 current_ir_graph = irg;
1795 res = new_bd_Bound(db, block, store, idx, lower, upper);
1796 current_ir_graph = rem;
1801 ir_node *new_rd_Keep(dbg_info *db, ir_graph *irg, ir_node *block, int n, ir_node *in[])
1804 ir_graph *rem = current_ir_graph;
1806 current_ir_graph = irg;
1807 res = new_bd_Keep(db, block, n, in);
1808 current_ir_graph = rem;
1813 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1814 return new_rd_Block(NULL, irg, arity, in);
1816 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1817 return new_rd_Start(NULL, irg, block);
1819 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1820 return new_rd_End(NULL, irg, block);
1822 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1823 return new_rd_Jmp(NULL, irg, block);
1825 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1826 return new_rd_IJmp(NULL, irg, block, tgt);
1828 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1829 return new_rd_Cond(NULL, irg, block, c);
1831 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1832 ir_node *store, int arity, ir_node **in) {
1833 return new_rd_Return(NULL, irg, block, store, arity, in);
1835 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1836 ir_node *store, ir_node *obj) {
1837 return new_rd_Raise(NULL, irg, block, store, obj);
1839 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1840 ir_mode *mode, tarval *con) {
1841 return new_rd_Const(NULL, irg, block, mode, con);
1844 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1845 ir_mode *mode, long value) {
1846 return new_rd_Const_long(NULL, irg, block, mode, value);
1849 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1850 ir_mode *mode, tarval *con, ir_type *tp) {
1851 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1854 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1855 symconst_symbol value, symconst_kind symkind) {
1856 return new_rd_SymConst(NULL, irg, block, value, symkind);
1858 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1859 ir_node *objptr, int n_index, ir_node **index,
1861 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1863 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1865 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1867 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1868 ir_node *callee, int arity, ir_node **in,
1870 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1872 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1873 ir_node *op1, ir_node *op2, ir_mode *mode) {
1874 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1876 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1877 ir_node *op1, ir_node *op2, ir_mode *mode) {
1878 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1880 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1881 ir_node *op, ir_mode *mode) {
1882 return new_rd_Minus(NULL, irg, block, op, mode);
1884 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1885 ir_node *op1, ir_node *op2, ir_mode *mode) {
1886 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1888 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1889 ir_node *memop, ir_node *op1, ir_node *op2) {
1890 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1892 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1893 ir_node *memop, ir_node *op1, ir_node *op2) {
1894 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1896 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1897 ir_node *memop, ir_node *op1, ir_node *op2) {
1898 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1900 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1901 ir_node *memop, ir_node *op1, ir_node *op2) {
1902 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1904 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1905 ir_node *op, ir_mode *mode) {
1906 return new_rd_Abs(NULL, irg, block, op, mode);
1908 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1909 ir_node *op1, ir_node *op2, ir_mode *mode) {
1910 return new_rd_And(NULL, irg, block, op1, op2, mode);
1912 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1913 ir_node *op1, ir_node *op2, ir_mode *mode) {
1914 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1916 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1917 ir_node *op1, ir_node *op2, ir_mode *mode) {
1918 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1920 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1921 ir_node *op, ir_mode *mode) {
1922 return new_rd_Not(NULL, irg, block, op, mode);
1924 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1925 ir_node *op1, ir_node *op2) {
1926 return new_rd_Cmp(NULL, irg, block, op1, op2);
1928 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1929 ir_node *op, ir_node *k, ir_mode *mode) {
1930 return new_rd_Shl(NULL, irg, block, op, k, mode);
1932 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1933 ir_node *op, ir_node *k, ir_mode *mode) {
1934 return new_rd_Shr(NULL, irg, block, op, k, mode);
1936 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1937 ir_node *op, ir_node *k, ir_mode *mode) {
1938 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1940 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1941 ir_node *op, ir_node *k, ir_mode *mode) {
1942 return new_rd_Rot(NULL, irg, block, op, k, mode);
1944 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1945 ir_node *op, ir_mode *mode) {
1946 return new_rd_Conv(NULL, irg, block, op, mode);
1948 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1949 return new_rd_Cast(NULL, irg, block, op, to_tp);
1951 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1952 ir_node **in, ir_mode *mode) {
1953 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1955 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1956 ir_node *store, ir_node *adr, ir_mode *mode) {
1957 return new_rd_Load(NULL, irg, block, store, adr, mode);
1959 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1960 ir_node *store, ir_node *adr, ir_node *val) {
1961 return new_rd_Store(NULL, irg, block, store, adr, val);
1963 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1964 ir_node *size, ir_type *alloc_type, where_alloc where) {
1965 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1967 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1968 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1969 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1971 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1972 return new_rd_Sync(NULL, irg, block, arity, in);
1974 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1975 ir_mode *mode, long proj) {
1976 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1978 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1980 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1982 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1983 int arity, ir_node **in) {
1984 return new_rd_Tuple(NULL, irg, block, arity, in );
1986 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1987 ir_node *val, ir_mode *mode) {
1988 return new_rd_Id(NULL, irg, block, val, mode);
1990 ir_node *new_r_Bad (ir_graph *irg) {
1991 return new_rd_Bad(irg);
1993 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1994 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1996 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1997 return new_rd_Unknown(irg, m);
1999 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
2000 return new_rd_CallBegin(NULL, irg, block, callee);
2002 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
2003 return new_rd_EndReg(NULL, irg, block);
2005 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
2006 return new_rd_EndExcept(NULL, irg, block);
2008 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
2009 return new_rd_Break(NULL, irg, block);
2011 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
2012 ir_mode *mode, long proj) {
2013 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
2015 ir_node *new_r_NoMem (ir_graph *irg) {
2016 return new_rd_NoMem(irg);
2018 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
2019 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2020 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
2022 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
2023 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
2024 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
2026 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
2027 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
2028 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
2031 ir_node *new_r_Keep(ir_graph *irg, ir_node *block,
2032 int n, ir_node *in[])
2034 return new_rd_Keep(NULL, irg, block, n, in);
2037 /** ********************/
2038 /** public interfaces */
2039 /** construction tools */
2043 * - create a new Start node in the current block
2045 * @return s - pointer to the created Start node
2050 new_d_Start (dbg_info *db)
2054 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
2055 op_Start, mode_T, 0, NULL);
2056 /* res->attr.start.irg = current_ir_graph; */
2058 res = optimize_node(res);
2059 IRN_VRFY_IRG(res, current_ir_graph);
2064 new_d_End (dbg_info *db)
2067 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
2068 op_End, mode_X, -1, NULL);
2069 res = optimize_node(res);
2070 IRN_VRFY_IRG(res, current_ir_graph);
2075 /* Constructs a Block with a fixed number of predecessors.
2076 Does set current_block. Can be used with automatic Phi
2077 node construction. */
2079 new_d_Block (dbg_info *db, int arity, ir_node **in)
2083 int has_unknown = 0;
2085 res = new_bd_Block(db, arity, in);
2087 /* Create and initialize array for Phi-node construction. */
2088 if (get_irg_phase_state(current_ir_graph) == phase_building) {
2089 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2090 current_ir_graph->n_loc);
2091 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2094 for (i = arity-1; i >= 0; i--)
2095 if (get_irn_op(in[i]) == op_Unknown) {
2100 if (!has_unknown) res = optimize_node(res);
2101 current_ir_graph->current_block = res;
2103 IRN_VRFY_IRG(res, current_ir_graph);
2108 /* ***********************************************************************/
2109 /* Methods necessary for automatic Phi node creation */
2111 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2112 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2113 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2114 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2116 Call Graph: ( A ---> B == A "calls" B)
2118 get_value mature_immBlock
2126 get_r_value_internal |
2130 new_rd_Phi0 new_rd_Phi_in
2132 * *************************************************************************** */
2134 /** Creates a Phi node with 0 predecessors */
2135 static INLINE ir_node *
2136 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
2140 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
2141 IRN_VRFY_IRG(res, irg);
2145 /* There are two implementations of the Phi node construction. The first
2146 is faster, but does not work for blocks with more than 2 predecessors.
2147 The second works always but is slower and causes more unnecessary Phi
2149 Select the implementations by the following preprocessor flag set in
2151 #if USE_FAST_PHI_CONSTRUCTION
2153 /* This is a stack used for allocating and deallocating nodes in
2154 new_rd_Phi_in. The original implementation used the obstack
2155 to model this stack, now it is explicit. This reduces side effects.
2157 #if USE_EXPLICIT_PHI_IN_STACK
2159 new_Phi_in_stack(void) {
2162 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
2164 res->stack = NEW_ARR_F (ir_node *, 0);
2171 free_Phi_in_stack(Phi_in_stack *s) {
2172 DEL_ARR_F(s->stack);
2176 free_to_Phi_in_stack(ir_node *phi) {
2177 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
2178 current_ir_graph->Phi_in_stack->pos)
2179 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
2181 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
2183 (current_ir_graph->Phi_in_stack->pos)++;
2186 static INLINE ir_node *
2187 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
2188 int arity, ir_node **in) {
2190 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
2191 int pos = current_ir_graph->Phi_in_stack->pos;
2195 /* We need to allocate a new node */
2196 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
2197 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
2199 /* reuse the old node and initialize it again. */
2202 assert (res->kind == k_ir_node);
2203 assert (res->op == op_Phi);
2207 assert (arity >= 0);
2208 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
2209 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
2211 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
2213 (current_ir_graph->Phi_in_stack->pos)--;
2217 #endif /* USE_EXPLICIT_PHI_IN_STACK */
2219 /* Creates a Phi node with a given, fixed array **in of predecessors.
2220 If the Phi node is unnecessary, as the same value reaches the block
2221 through all control flow paths, it is eliminated and the value
2222 returned directly. This constructor is only intended for use in
2223 the automatic Phi node generation triggered by get_value or mature.
2224 The implementation is quite tricky and depends on the fact, that
2225 the nodes are allocated on a stack:
2226 The in array contains predecessors and NULLs. The NULLs appear,
2227 if get_r_value_internal, that computed the predecessors, reached
2228 the same block on two paths. In this case the same value reaches
2229 this block on both paths, there is no definition in between. We need
2230 not allocate a Phi where these path's merge, but we have to communicate
2231 this fact to the caller. This happens by returning a pointer to the
2232 node the caller _will_ allocate. (Yes, we predict the address. We can
2233 do so because the nodes are allocated on the obstack.) The caller then
2234 finds a pointer to itself and, when this routine is called again,
2237 static INLINE ir_node *
2238 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
2241 ir_node *res, *known;
2243 /* Allocate a new node on the obstack. This can return a node to
2244 which some of the pointers in the in-array already point.
2245 Attention: the constructor copies the in array, i.e., the later
2246 changes to the array in this routine do not affect the
2247 constructed node! If the in array contains NULLs, there will be
2248 missing predecessors in the returned node. Is this a possible
2249 internal state of the Phi node generation? */
2250 #if USE_EXPLICIT_PHI_IN_STACK
2251 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
2253 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2254 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2257 /* The in-array can contain NULLs. These were returned by
2258 get_r_value_internal if it reached the same block/definition on a
2259 second path. The NULLs are replaced by the node itself to
2260 simplify the test in the next loop. */
2261 for (i = 0; i < ins; ++i) {
2266 /* This loop checks whether the Phi has more than one predecessor.
2267 If so, it is a real Phi node and we break the loop. Else the Phi
2268 node merges the same definition on several paths and therefore is
2270 for (i = 0; i < ins; ++i) {
2271 if (in[i] == res || in[i] == known)
2280 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2282 #if USE_EXPLICIT_PHI_IN_STACK
2283 free_to_Phi_in_stack(res);
2285 edges_node_deleted(res, current_ir_graph);
2286 obstack_free(current_ir_graph->obst, res);
2290 res = optimize_node (res);
2291 IRN_VRFY_IRG(res, irg);
2294 /* return the pointer to the Phi node. This node might be deallocated! */
2299 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2302 allocates and returns this node. The routine called to allocate the
2303 node might optimize it away and return a real value, or even a pointer
2304 to a deallocated Phi node on top of the obstack!
2305 This function is called with an in-array of proper size. **/
2307 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2309 ir_node *prevBlock, *res;
2312 /* This loop goes to all predecessor blocks of the block the Phi node is in
2313 and there finds the operands of the Phi node by calling
2314 get_r_value_internal. */
2315 for (i = 1; i <= ins; ++i) {
2316 assert (block->in[i]);
2317 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2319 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2322 /* After collecting all predecessors into the array nin a new Phi node
2323 with these predecessors is created. This constructor contains an
2324 optimization: If all predecessors of the Phi node are identical it
2325 returns the only operand instead of a new Phi node. If the value
2326 passes two different control flow edges without being defined, and
2327 this is the second path treated, a pointer to the node that will be
2328 allocated for the first path (recursion) is returned. We already
2329 know the address of this node, as it is the next node to be allocated
2330 and will be placed on top of the obstack. (The obstack is a _stack_!) */
2331 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
2333 /* Now we now the value for "pos" and can enter it in the array with
2334 all known local variables. Attention: this might be a pointer to
2335 a node, that later will be allocated!!! See new_rd_Phi_in.
2336 If this is called in mature, after some set_value in the same block,
2337 the proper value must not be overwritten:
2339 get_value (makes Phi0, put's it into graph_arr)
2340 set_value (overwrites Phi0 in graph_arr)
2341 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2344 if (!block->attr.block.graph_arr[pos]) {
2345 block->attr.block.graph_arr[pos] = res;
2347 /* printf(" value already computed by %s\n",
2348 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2354 /* This function returns the last definition of a variable. In case
2355 this variable was last defined in a previous block, Phi nodes are
2356 inserted. If the part of the firm graph containing the definition
2357 is not yet constructed, a dummy Phi node is returned. */
2359 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2362 /* There are 4 cases to treat.
2364 1. The block is not mature and we visit it the first time. We can not
2365 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2366 predecessors is returned. This node is added to the linked list (field
2367 "link") of the containing block to be completed when this block is
2368 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2371 2. The value is already known in this block, graph_arr[pos] is set and we
2372 visit the block the first time. We can return the value without
2373 creating any new nodes.
2375 3. The block is mature and we visit it the first time. A Phi node needs
2376 to be created (phi_merge). If the Phi is not needed, as all it's
2377 operands are the same value reaching the block through different
2378 paths, it's optimized away and the value itself is returned.
2380 4. The block is mature, and we visit it the second time. Now two
2381 subcases are possible:
2382 * The value was computed completely the last time we were here. This
2383 is the case if there is no loop. We can return the proper value.
2384 * The recursion that visited this node and set the flag did not
2385 return yet. We are computing a value in a loop and need to
2386 break the recursion without knowing the result yet.
2387 @@@ strange case. Straight forward we would create a Phi before
2388 starting the computation of it's predecessors. In this case we will
2389 find a Phi here in any case. The problem is that this implementation
2390 only creates a Phi after computing the predecessors, so that it is
2391 hard to compute self references of this Phi. @@@
2392 There is no simple check for the second subcase. Therefore we check
2393 for a second visit and treat all such cases as the second subcase.
2394 Anyways, the basic situation is the same: we reached a block
2395 on two paths without finding a definition of the value: No Phi
2396 nodes are needed on both paths.
2397 We return this information "Two paths, no Phi needed" by a very tricky
2398 implementation that relies on the fact that an obstack is a stack and
2399 will return a node with the same address on different allocations.
2400 Look also at phi_merge and new_rd_phi_in to understand this.
2401 @@@ Unfortunately this does not work, see testprogram
2402 three_cfpred_example.
2406 /* case 4 -- already visited. */
2407 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2409 /* visited the first time */
2410 set_irn_visited(block, get_irg_visited(current_ir_graph));
2412 /* Get the local valid value */
2413 res = block->attr.block.graph_arr[pos];
2415 /* case 2 -- If the value is actually computed, return it. */
2416 if (res) return res;
2418 if (block->attr.block.matured) { /* case 3 */
2420 /* The Phi has the same amount of ins as the corresponding block. */
2421 int ins = get_irn_arity(block);
2423 NEW_ARR_A (ir_node *, nin, ins);
2425 /* Phi merge collects the predecessors and then creates a node. */
2426 res = phi_merge (block, pos, mode, nin, ins);
2428 } else { /* case 1 */
2429 /* The block is not mature, we don't know how many in's are needed. A Phi
2430 with zero predecessors is created. Such a Phi node is called Phi0
2431 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2432 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2434 The Phi0 has to remember the pos of it's internal value. If the real
2435 Phi is computed, pos is used to update the array with the local
2438 res = new_rd_Phi0 (current_ir_graph, block, mode);
2439 res->attr.phi0_pos = pos;
2440 res->link = block->link;
2444 /* If we get here, the frontend missed a use-before-definition error */
2447 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2448 assert (mode->code >= irm_F && mode->code <= irm_P);
2449 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2450 tarval_mode_null[mode->code]);
2453 /* The local valid value is available now. */
2454 block->attr.block.graph_arr[pos] = res;
2462 it starts the recursion. This causes an Id at the entry of
2463 every block that has no definition of the value! **/
2465 #if USE_EXPLICIT_PHI_IN_STACK
2467 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2468 void free_Phi_in_stack(Phi_in_stack *s) { }
2471 static INLINE ir_node *
2472 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2473 ir_node **in, int ins, ir_node *phi0)
2476 ir_node *res, *known;
2478 /* Allocate a new node on the obstack. The allocation copies the in
2480 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2481 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2483 /* This loop checks whether the Phi has more than one predecessor.
2484 If so, it is a real Phi node and we break the loop. Else the
2485 Phi node merges the same definition on several paths and therefore
2486 is not needed. Don't consider Bad nodes! */
2488 for (i=0; i < ins; ++i)
2492 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2494 /* Optimize self referencing Phis: We can't detect them yet properly, as
2495 they still refer to the Phi0 they will replace. So replace right now. */
2496 if (phi0 && in[i] == phi0) in[i] = res;
2498 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2506 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2509 edges_node_deleted(res, current_ir_graph);
2510 obstack_free (current_ir_graph->obst, res);
2511 if (is_Phi(known)) {
2512 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2513 order, an enclosing Phi know may get superfluous. */
2514 res = optimize_in_place_2(known);
2516 exchange(known, res);
2522 /* A undefined value, e.g., in unreachable code. */
2526 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2527 IRN_VRFY_IRG(res, irg);
2528 /* Memory Phis in endless loops must be kept alive.
2529 As we can't distinguish these easily we keep all of them alive. */
2530 if ((res->op == op_Phi) && (mode == mode_M))
2531 add_End_keepalive(irg->end, res);
2538 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2540 #if PRECISE_EXC_CONTEXT
2542 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2544 /* Construct a new frag_array for node n.
2545 Copy the content from the current graph_arr of the corresponding block:
2546 this is the current state.
2547 Set ProjM(n) as current memory state.
2548 Further the last entry in frag_arr of current block points to n. This
2549 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2551 static INLINE ir_node ** new_frag_arr (ir_node *n)
2556 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2557 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2558 sizeof(ir_node *)*current_ir_graph->n_loc);
2560 /* turn off optimization before allocating Proj nodes, as res isn't
2562 opt = get_opt_optimize(); set_optimize(0);
2563 /* Here we rely on the fact that all frag ops have Memory as first result! */
2564 if (get_irn_op(n) == op_Call)
2565 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2567 assert((pn_Quot_M == pn_DivMod_M) &&
2568 (pn_Quot_M == pn_Div_M) &&
2569 (pn_Quot_M == pn_Mod_M) &&
2570 (pn_Quot_M == pn_Load_M) &&
2571 (pn_Quot_M == pn_Store_M) &&
2572 (pn_Quot_M == pn_Alloc_M) );
2573 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2577 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2582 * returns the frag_arr from a node
2584 static INLINE ir_node **
2585 get_frag_arr (ir_node *n) {
2586 switch (get_irn_opcode(n)) {
2588 return n->attr.call.exc.frag_arr;
2590 return n->attr.a.exc.frag_arr;
2592 return n->attr.load.exc.frag_arr;
2594 return n->attr.store.exc.frag_arr;
2596 return n->attr.except.frag_arr;
2601 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2603 if (!frag_arr[pos]) frag_arr[pos] = val;
2604 if (frag_arr[current_ir_graph->n_loc - 1]) {
2605 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2606 assert(arr != frag_arr && "Endless recursion detected");
2607 set_frag_value(arr, pos, val);
2612 for (i = 0; i < 1000; ++i) {
2613 if (!frag_arr[pos]) {
2614 frag_arr[pos] = val;
2616 if (frag_arr[current_ir_graph->n_loc - 1]) {
2617 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2623 assert(0 && "potential endless recursion");
2628 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2632 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2634 frag_arr = get_frag_arr(cfOp);
2635 res = frag_arr[pos];
2637 if (block->attr.block.graph_arr[pos]) {
2638 /* There was a set_value after the cfOp and no get_value before that
2639 set_value. We must build a Phi node now. */
2640 if (block->attr.block.matured) {
2641 int ins = get_irn_arity(block);
2643 NEW_ARR_A (ir_node *, nin, ins);
2644 res = phi_merge(block, pos, mode, nin, ins);
2646 res = new_rd_Phi0 (current_ir_graph, block, mode);
2647 res->attr.phi0_pos = pos;
2648 res->link = block->link;
2652 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2653 but this should be better: (remove comment if this works) */
2654 /* It's a Phi, we can write this into all graph_arrs with NULL */
2655 set_frag_value(block->attr.block.graph_arr, pos, res);
2657 res = get_r_value_internal(block, pos, mode);
2658 set_frag_value(block->attr.block.graph_arr, pos, res);
2663 #endif /* PRECISE_EXC_CONTEXT */
2666 computes the predecessors for the real phi node, and then
2667 allocates and returns this node. The routine called to allocate the
2668 node might optimize it away and return a real value.
2669 This function must be called with an in-array of proper size. **/
2671 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2673 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2676 /* If this block has no value at pos create a Phi0 and remember it
2677 in graph_arr to break recursions.
2678 Else we may not set graph_arr as there a later value is remembered. */
2680 if (!block->attr.block.graph_arr[pos]) {
2681 if (block == get_irg_start_block(current_ir_graph)) {
2682 /* Collapsing to Bad tarvals is no good idea.
2683 So we call a user-supplied routine here that deals with this case as
2684 appropriate for the given language. Sorrily the only help we can give
2685 here is the position.
2687 Even if all variables are defined before use, it can happen that
2688 we get to the start block, if a Cond has been replaced by a tuple
2689 (bad, jmp). In this case we call the function needlessly, eventually
2690 generating an non existent error.
2691 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2694 if (default_initialize_local_variable) {
2695 ir_node *rem = get_cur_block();
2697 set_cur_block(block);
2698 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2702 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2703 /* We don't need to care about exception ops in the start block.
2704 There are none by definition. */
2705 return block->attr.block.graph_arr[pos];
2707 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2708 block->attr.block.graph_arr[pos] = phi0;
2709 #if PRECISE_EXC_CONTEXT
2710 if (get_opt_precise_exc_context()) {
2711 /* Set graph_arr for fragile ops. Also here we should break recursion.
2712 We could choose a cyclic path through an cfop. But the recursion would
2713 break at some point. */
2714 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2720 /* This loop goes to all predecessor blocks of the block the Phi node
2721 is in and there finds the operands of the Phi node by calling
2722 get_r_value_internal. */
2723 for (i = 1; i <= ins; ++i) {
2724 prevCfOp = skip_Proj(block->in[i]);
2726 if (is_Bad(prevCfOp)) {
2727 /* In case a Cond has been optimized we would get right to the start block
2728 with an invalid definition. */
2729 nin[i-1] = new_Bad();
2732 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2734 if (!is_Bad(prevBlock)) {
2735 #if PRECISE_EXC_CONTEXT
2736 if (get_opt_precise_exc_context() &&
2737 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2738 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2739 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2742 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2744 nin[i-1] = new_Bad();
2748 /* We want to pass the Phi0 node to the constructor: this finds additional
2749 optimization possibilities.
2750 The Phi0 node either is allocated in this function, or it comes from
2751 a former call to get_r_value_internal. In this case we may not yet
2752 exchange phi0, as this is done in mature_immBlock. */
2754 phi0_all = block->attr.block.graph_arr[pos];
2755 if (!((get_irn_op(phi0_all) == op_Phi) &&
2756 (get_irn_arity(phi0_all) == 0) &&
2757 (get_nodes_block(phi0_all) == block)))
2763 /* After collecting all predecessors into the array nin a new Phi node
2764 with these predecessors is created. This constructor contains an
2765 optimization: If all predecessors of the Phi node are identical it
2766 returns the only operand instead of a new Phi node. */
2767 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2769 /* In case we allocated a Phi0 node at the beginning of this procedure,
2770 we need to exchange this Phi0 with the real Phi. */
2772 exchange(phi0, res);
2773 block->attr.block.graph_arr[pos] = res;
2774 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2775 only an optimization. */
2781 /* This function returns the last definition of a variable. In case
2782 this variable was last defined in a previous block, Phi nodes are
2783 inserted. If the part of the firm graph containing the definition
2784 is not yet constructed, a dummy Phi node is returned. */
2786 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2789 /* There are 4 cases to treat.
2791 1. The block is not mature and we visit it the first time. We can not
2792 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2793 predecessors is returned. This node is added to the linked list (field
2794 "link") of the containing block to be completed when this block is
2795 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2798 2. The value is already known in this block, graph_arr[pos] is set and we
2799 visit the block the first time. We can return the value without
2800 creating any new nodes.
2802 3. The block is mature and we visit it the first time. A Phi node needs
2803 to be created (phi_merge). If the Phi is not needed, as all it's
2804 operands are the same value reaching the block through different
2805 paths, it's optimized away and the value itself is returned.
2807 4. The block is mature, and we visit it the second time. Now two
2808 subcases are possible:
2809 * The value was computed completely the last time we were here. This
2810 is the case if there is no loop. We can return the proper value.
2811 * The recursion that visited this node and set the flag did not
2812 return yet. We are computing a value in a loop and need to
2813 break the recursion. This case only happens if we visited
2814 the same block with phi_merge before, which inserted a Phi0.
2815 So we return the Phi0.
2818 /* case 4 -- already visited. */
2819 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2820 /* As phi_merge allocates a Phi0 this value is always defined. Here
2821 is the critical difference of the two algorithms. */
2822 assert(block->attr.block.graph_arr[pos]);
2823 return block->attr.block.graph_arr[pos];
2826 /* visited the first time */
2827 set_irn_visited(block, get_irg_visited(current_ir_graph));
2829 /* Get the local valid value */
2830 res = block->attr.block.graph_arr[pos];
2832 /* case 2 -- If the value is actually computed, return it. */
2833 if (res) { return res; };
2835 if (block->attr.block.matured) { /* case 3 */
2837 /* The Phi has the same amount of ins as the corresponding block. */
2838 int ins = get_irn_arity(block);
2840 NEW_ARR_A (ir_node *, nin, ins);
2842 /* Phi merge collects the predecessors and then creates a node. */
2843 res = phi_merge (block, pos, mode, nin, ins);
2845 } else { /* case 1 */
2846 /* The block is not mature, we don't know how many in's are needed. A Phi
2847 with zero predecessors is created. Such a Phi node is called Phi0
2848 node. The Phi0 is then added to the list of Phi0 nodes in this block
2849 to be matured by mature_immBlock later.
2850 The Phi0 has to remember the pos of it's internal value. If the real
2851 Phi is computed, pos is used to update the array with the local
2853 res = new_rd_Phi0 (current_ir_graph, block, mode);
2854 res->attr.phi0_pos = pos;
2855 res->link = block->link;
2859 /* If we get here, the frontend missed a use-before-definition error */
2862 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2863 assert (mode->code >= irm_F && mode->code <= irm_P);
2864 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2865 get_mode_null(mode));
2868 /* The local valid value is available now. */
2869 block->attr.block.graph_arr[pos] = res;
2874 #endif /* USE_FAST_PHI_CONSTRUCTION */
2876 /* ************************************************************************** */
2879 * Finalize a Block node, when all control flows are known.
2880 * Acceptable parameters are only Block nodes.
2883 mature_immBlock (ir_node *block)
2889 assert (get_irn_opcode(block) == iro_Block);
2890 /* @@@ should be commented in
2891 assert (!get_Block_matured(block) && "Block already matured"); */
2893 if (!get_Block_matured(block)) {
2894 ins = ARR_LEN (block->in)-1;
2895 /* Fix block parameters */
2896 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2898 /* An array for building the Phi nodes. */
2899 NEW_ARR_A (ir_node *, nin, ins);
2901 /* Traverse a chain of Phi nodes attached to this block and mature
2903 for (n = block->link; n; n=next) {
2904 inc_irg_visited(current_ir_graph);
2906 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2909 block->attr.block.matured = 1;
2911 /* Now, as the block is a finished firm node, we can optimize it.
2912 Since other nodes have been allocated since the block was created
2913 we can not free the node on the obstack. Therefore we have to call
2915 Unfortunately the optimization does not change a lot, as all allocated
2916 nodes refer to the unoptimized node.
2917 We can call _2, as global cse has no effect on blocks. */
2918 block = optimize_in_place_2(block);
2919 IRN_VRFY_IRG(block, current_ir_graph);
2924 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2926 return new_bd_Phi(db, current_ir_graph->current_block,
2931 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2933 return new_bd_Const(db, current_ir_graph->start_block,
2938 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2940 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2944 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2946 return new_bd_Const_type(db, current_ir_graph->start_block,
2952 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2954 return new_bd_Id(db, current_ir_graph->current_block,
2959 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2961 return new_bd_Proj(db, current_ir_graph->current_block,
2966 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2969 assert(arg->op == op_Cond);
2970 arg->attr.c.kind = fragmentary;
2971 arg->attr.c.default_proj = max_proj;
2972 res = new_Proj (arg, mode_X, max_proj);
2977 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2979 return new_bd_Conv(db, current_ir_graph->current_block,
2984 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp)
2986 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2990 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2992 return new_bd_Tuple(db, current_ir_graph->current_block,
2997 new_d_Add (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
2999 return new_bd_Add(db, current_ir_graph->current_block,
3004 new_d_Sub (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3006 return new_bd_Sub(db, current_ir_graph->current_block,
3012 new_d_Minus (dbg_info *db, ir_node *op, ir_mode *mode)
3014 return new_bd_Minus(db, current_ir_graph->current_block,
3019 new_d_Mul (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3021 return new_bd_Mul(db, current_ir_graph->current_block,
3026 * allocate the frag array
3028 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
3029 if (get_opt_precise_exc_context()) {
3030 if ((current_ir_graph->phase_state == phase_building) &&
3031 (get_irn_op(res) == op) && /* Could be optimized away. */
3032 !*frag_store) /* Could be a cse where the arr is already set. */ {
3033 *frag_store = new_frag_arr(res);
3040 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3043 res = new_bd_Quot (db, current_ir_graph->current_block,
3045 res->attr.except.pin_state = op_pin_state_pinned;
3046 #if PRECISE_EXC_CONTEXT
3047 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
3054 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3057 res = new_bd_DivMod (db, current_ir_graph->current_block,
3059 res->attr.except.pin_state = op_pin_state_pinned;
3060 #if PRECISE_EXC_CONTEXT
3061 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
3068 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3071 res = new_bd_Div (db, current_ir_graph->current_block,
3073 res->attr.except.pin_state = op_pin_state_pinned;
3074 #if PRECISE_EXC_CONTEXT
3075 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
3082 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
3085 res = new_bd_Mod (db, current_ir_graph->current_block,
3087 res->attr.except.pin_state = op_pin_state_pinned;
3088 #if PRECISE_EXC_CONTEXT
3089 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
3096 new_d_And (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3098 return new_bd_And (db, current_ir_graph->current_block,
3103 new_d_Or (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3105 return new_bd_Or (db, current_ir_graph->current_block,
3110 new_d_Eor (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
3112 return new_bd_Eor (db, current_ir_graph->current_block,
3117 new_d_Not (dbg_info *db, ir_node *op, ir_mode *mode)
3119 return new_bd_Not (db, current_ir_graph->current_block,
3124 new_d_Shl (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3126 return new_bd_Shl (db, current_ir_graph->current_block,
3131 new_d_Shr (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3133 return new_bd_Shr (db, current_ir_graph->current_block,
3138 new_d_Shrs (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3140 return new_bd_Shrs (db, current_ir_graph->current_block,
3145 new_d_Rot (dbg_info *db, ir_node *op, ir_node *k, ir_mode *mode)
3147 return new_bd_Rot (db, current_ir_graph->current_block,
3152 new_d_Abs (dbg_info *db, ir_node *op, ir_mode *mode)
3154 return new_bd_Abs (db, current_ir_graph->current_block,
3159 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
3161 return new_bd_Cmp (db, current_ir_graph->current_block,
3166 new_d_Jmp (dbg_info *db)
3168 return new_bd_Jmp (db, current_ir_graph->current_block);
3172 new_d_IJmp (dbg_info *db, ir_node *tgt)
3174 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
3178 new_d_Cond (dbg_info *db, ir_node *c)
3180 return new_bd_Cond (db, current_ir_graph->current_block, c);
3184 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
3188 res = new_bd_Call (db, current_ir_graph->current_block,
3189 store, callee, arity, in, tp);
3190 #if PRECISE_EXC_CONTEXT
3191 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
3198 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
3200 return new_bd_Return (db, current_ir_graph->current_block,
3205 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
3207 return new_bd_Raise (db, current_ir_graph->current_block,
3212 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
3215 res = new_bd_Load (db, current_ir_graph->current_block,
3217 #if PRECISE_EXC_CONTEXT
3218 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
3225 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
3228 res = new_bd_Store (db, current_ir_graph->current_block,
3230 #if PRECISE_EXC_CONTEXT
3231 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
3238 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
3242 res = new_bd_Alloc (db, current_ir_graph->current_block,
3243 store, size, alloc_type, where);
3244 #if PRECISE_EXC_CONTEXT
3245 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
3252 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
3253 ir_node *size, ir_type *free_type, where_alloc where)
3255 return new_bd_Free (db, current_ir_graph->current_block,
3256 store, ptr, size, free_type, where);
3260 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
3261 /* GL: objptr was called frame before. Frame was a bad choice for the name
3262 as the operand could as well be a pointer to a dynamic object. */
3264 return new_bd_Sel (db, current_ir_graph->current_block,
3265 store, objptr, 0, NULL, ent);
3269 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
3271 return new_bd_Sel (db, current_ir_graph->current_block,
3272 store, objptr, n_index, index, sel);
3276 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
3278 return (new_bd_InstOf (db, current_ir_graph->current_block,
3279 store, objptr, type));
3283 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
3285 return new_bd_SymConst_type (db, current_ir_graph->start_block,
3290 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
3292 return new_bd_SymConst (db, current_ir_graph->start_block,
3297 new_d_Sync (dbg_info *db, int arity, ir_node** in)
3299 return new_bd_Sync (db, current_ir_graph->current_block,
3306 return _new_d_Bad();
3310 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
3312 return new_bd_Confirm (db, current_ir_graph->current_block,
3317 new_d_Unknown (ir_mode *m)
3319 return new_bd_Unknown(m);
3323 new_d_CallBegin (dbg_info *db, ir_node *call)
3326 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
3331 new_d_EndReg (dbg_info *db)
3334 res = new_bd_EndReg(db, current_ir_graph->current_block);
3339 new_d_EndExcept (dbg_info *db)
3342 res = new_bd_EndExcept(db, current_ir_graph->current_block);
3347 new_d_Break (dbg_info *db)
3349 return new_bd_Break (db, current_ir_graph->current_block);
3353 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
3355 return new_bd_Filter (db, current_ir_graph->current_block,
3362 return _new_d_NoMem();
3366 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
3367 ir_node *ir_true, ir_mode *mode) {
3368 return new_bd_Mux (db, current_ir_graph->current_block,
3369 sel, ir_false, ir_true, mode);
3372 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
3373 ir_node *dst, ir_node *src, ir_type *data_type) {
3375 res = new_bd_CopyB(db, current_ir_graph->current_block,
3376 store, dst, src, data_type);
3377 #if PRECISE_EXC_CONTEXT
3378 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
3383 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
3384 ir_node *idx, ir_node *lower, ir_node *upper) {
3386 res = new_bd_Bound(db, current_ir_graph->current_block,
3387 store, idx, lower, upper);
3388 #if PRECISE_EXC_CONTEXT
3389 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
3394 ir_node *new_d_Keep(dbg_info *db, int n, ir_node *in[])
3396 return new_bd_Keep(db, current_ir_graph->current_block, n, in);
3399 /* ********************************************************************* */
3400 /* Comfortable interface with automatic Phi node construction. */
3401 /* (Uses also constructors of ?? interface, except new_Block. */
3402 /* ********************************************************************* */
3404 /* Block construction */
3405 /* immature Block without predecessors */
3406 ir_node *new_d_immBlock (dbg_info *db) {
3409 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3410 /* creates a new dynamic in-array as length of in is -1 */
3411 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
3412 current_ir_graph->current_block = res;
3413 res->attr.block.matured = 0;
3414 res->attr.block.dead = 0;
3415 /* res->attr.block.exc = exc_normal; */
3416 /* res->attr.block.handler_entry = 0; */
3417 res->attr.block.irg = current_ir_graph;
3418 res->attr.block.backedge = NULL;
3419 res->attr.block.in_cg = NULL;
3420 res->attr.block.cg_backedge = NULL;
3421 set_Block_block_visited(res, 0);
3423 /* Create and initialize array for Phi-node construction. */
3424 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3425 current_ir_graph->n_loc);
3426 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3428 /* Immature block may not be optimized! */
3429 IRN_VRFY_IRG(res, current_ir_graph);
3435 new_immBlock (void) {
3436 return new_d_immBlock(NULL);
3439 /* add an edge to a jmp/control flow node */
3441 add_immBlock_pred (ir_node *block, ir_node *jmp)
3443 if (block->attr.block.matured) {
3444 assert(0 && "Error: Block already matured!\n");
3447 assert(jmp != NULL);
3448 ARR_APP1(ir_node *, block->in, jmp);
3452 /* changing the current block */
3454 set_cur_block (ir_node *target) {
3455 current_ir_graph->current_block = target;
3458 /* ************************ */
3459 /* parameter administration */
3461 /* get a value from the parameter array from the current block by its index */
3463 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3465 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3466 inc_irg_visited(current_ir_graph);
3468 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3470 /* get a value from the parameter array from the current block by its index */
3472 get_value (int pos, ir_mode *mode)
3474 return get_d_value(NULL, pos, mode);
3477 /* set a value at position pos in the parameter array from the current block */
3479 set_value (int pos, ir_node *value)
3481 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3482 assert(pos+1 < current_ir_graph->n_loc);
3483 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3486 /* get the current store */
3490 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3491 /* GL: one could call get_value instead */
3492 inc_irg_visited(current_ir_graph);
3493 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3496 /* set the current store */
3498 set_store (ir_node *store)
3500 /* GL: one could call set_value instead */
3501 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3502 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3506 keep_alive (ir_node *ka) {
3507 add_End_keepalive(current_ir_graph->end, ka);
3510 /* --- Useful access routines --- */
3511 /* Returns the current block of the current graph. To set the current
3512 block use set_cur_block. */
3513 ir_node *get_cur_block(void) {
3514 return get_irg_current_block(current_ir_graph);
3517 /* Returns the frame type of the current graph */
3518 ir_type *get_cur_frame_type(void) {
3519 return get_irg_frame_type(current_ir_graph);
3523 /* ********************************************************************* */
3526 /* call once for each run of the library */
3528 init_cons(uninitialized_local_variable_func_t *func)
3530 default_initialize_local_variable = func;
3533 /* call for each graph */
3535 irg_finalize_cons (ir_graph *irg) {
3536 irg->phase_state = phase_high;
3540 irp_finalize_cons (void) {
3541 int i, n_irgs = get_irp_n_irgs();
3542 for (i = 0; i < n_irgs; i++) {
3543 irg_finalize_cons(get_irp_irg(i));
3545 irp->phase_state = phase_high;\
3551 ir_node *new_Block(int arity, ir_node **in) {
3552 return new_d_Block(NULL, arity, in);
3554 ir_node *new_Start (void) {
3555 return new_d_Start(NULL);
3557 ir_node *new_End (void) {
3558 return new_d_End(NULL);
3560 ir_node *new_Jmp (void) {
3561 return new_d_Jmp(NULL);
3563 ir_node *new_IJmp (ir_node *tgt) {
3564 return new_d_IJmp(NULL, tgt);
3566 ir_node *new_Cond (ir_node *c) {
3567 return new_d_Cond(NULL, c);
3569 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3570 return new_d_Return(NULL, store, arity, in);
3572 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3573 return new_d_Raise(NULL, store, obj);
3575 ir_node *new_Const (ir_mode *mode, tarval *con) {
3576 return new_d_Const(NULL, mode, con);
3579 ir_node *new_Const_long(ir_mode *mode, long value)
3581 return new_d_Const_long(NULL, mode, value);
3584 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3585 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3588 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3589 return new_d_SymConst(NULL, value, kind);
3591 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3592 return new_d_simpleSel(NULL, store, objptr, ent);
3594 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3596 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3598 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3599 return new_d_InstOf (NULL, store, objptr, ent);
3601 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3603 return new_d_Call(NULL, store, callee, arity, in, tp);
3605 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3606 return new_d_Add(NULL, op1, op2, mode);
3608 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3609 return new_d_Sub(NULL, op1, op2, mode);
3611 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3612 return new_d_Minus(NULL, op, mode);
3614 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3615 return new_d_Mul(NULL, op1, op2, mode);
3617 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3618 return new_d_Quot(NULL, memop, op1, op2);
3620 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3621 return new_d_DivMod(NULL, memop, op1, op2);
3623 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3624 return new_d_Div(NULL, memop, op1, op2);
3626 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3627 return new_d_Mod(NULL, memop, op1, op2);
3629 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3630 return new_d_Abs(NULL, op, mode);
3632 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3633 return new_d_And(NULL, op1, op2, mode);
3635 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3636 return new_d_Or(NULL, op1, op2, mode);
3638 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3639 return new_d_Eor(NULL, op1, op2, mode);
3641 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3642 return new_d_Not(NULL, op, mode);
3644 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3645 return new_d_Shl(NULL, op, k, mode);
3647 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3648 return new_d_Shr(NULL, op, k, mode);
3650 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3651 return new_d_Shrs(NULL, op, k, mode);
3653 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3654 return new_d_Rot(NULL, op, k, mode);
3656 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3657 return new_d_Cmp(NULL, op1, op2);
3659 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3660 return new_d_Conv(NULL, op, mode);
3662 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3663 return new_d_Cast(NULL, op, to_tp);
3665 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3666 return new_d_Phi(NULL, arity, in, mode);
3668 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3669 return new_d_Load(NULL, store, addr, mode);
3671 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3672 return new_d_Store(NULL, store, addr, val);
3674 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3675 where_alloc where) {
3676 return new_d_Alloc(NULL, store, size, alloc_type, where);
3678 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3679 ir_type *free_type, where_alloc where) {
3680 return new_d_Free(NULL, store, ptr, size, free_type, where);
3682 ir_node *new_Sync (int arity, ir_node **in) {
3683 return new_d_Sync(NULL, arity, in);
3685 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3686 return new_d_Proj(NULL, arg, mode, proj);
3688 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3689 return new_d_defaultProj(NULL, arg, max_proj);
3691 ir_node *new_Tuple (int arity, ir_node **in) {
3692 return new_d_Tuple(NULL, arity, in);
3694 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3695 return new_d_Id(NULL, val, mode);
3697 ir_node *new_Bad (void) {
3700 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3701 return new_d_Confirm (NULL, val, bound, cmp);
3703 ir_node *new_Unknown(ir_mode *m) {
3704 return new_d_Unknown(m);
3706 ir_node *new_CallBegin (ir_node *callee) {
3707 return new_d_CallBegin(NULL, callee);
3709 ir_node *new_EndReg (void) {
3710 return new_d_EndReg(NULL);
3712 ir_node *new_EndExcept (void) {
3713 return new_d_EndExcept(NULL);
3715 ir_node *new_Break (void) {
3716 return new_d_Break(NULL);
3718 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3719 return new_d_Filter(NULL, arg, mode, proj);
3721 ir_node *new_NoMem (void) {
3722 return new_d_NoMem();
3724 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3725 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3727 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3728 return new_d_CopyB(NULL, store, dst, src, data_type);
3730 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3731 return new_d_Bound(NULL, store, idx, lower, upper);
3733 ir_node *new_Keep(int n, ir_node *in[]) {
3734 return new_d_Keep(NULL, n, in);