3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * language dependant initialization variable
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* -------------------------------------------- */
66 /* privat interfaces, for professional use only */
67 /* -------------------------------------------- */
69 /* Constructs a Block with a fixed number of predecessors.
70 Does not set current_block. Can not be used with automatic
71 Phi node construction. */
73 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
77 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
78 set_Block_matured(res, 1);
79 set_Block_block_visited(res, 0);
81 /* res->attr.block.exc = exc_normal; */
82 /* res->attr.block.handler_entry = 0; */
83 res->attr.block.dead = 0;
84 res->attr.block.irg = irg;
85 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
86 res->attr.block.in_cg = NULL;
87 res->attr.block.cg_backedge = NULL;
89 IRN_VRFY_IRG(res, irg);
94 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
98 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
99 /* res->attr.start.irg = irg; */
101 IRN_VRFY_IRG(res, irg);
106 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
110 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
112 IRN_VRFY_IRG(res, irg);
116 /* Creates a Phi node with all predecessors. Calling this constructor
117 is only allowed if the corresponding block is mature. */
119 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
123 bool has_unknown = false;
125 /* Don't assert that block matured: the use of this constructor is strongly
127 if ( get_Block_matured(block) )
128 assert( get_irn_arity(block) == arity );
130 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
132 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
134 for (i = arity-1; i >= 0; i--)
135 if (get_irn_op(in[i]) == op_Unknown) {
140 if (!has_unknown) res = optimize_node (res);
141 IRN_VRFY_IRG(res, irg);
143 /* Memory Phis in endless loops must be kept alive.
144 As we can't distinguish these easily we keep all of them alive. */
145 if ((res->op == op_Phi) && (mode == mode_M))
146 add_End_keepalive(irg->end, res);
151 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
155 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
156 res->attr.con.tv = con;
157 set_Const_type(res, tp); /* Call method because of complex assertion. */
158 res = optimize_node (res);
159 assert(get_Const_type(res) == tp);
160 IRN_VRFY_IRG(res, irg);
166 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
168 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
172 new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
174 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
178 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
182 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
183 res = optimize_node(res);
184 IRN_VRFY_IRG(res, irg);
189 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
194 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
195 res->attr.proj = proj;
198 assert(get_Proj_pred(res));
199 assert(get_nodes_block(get_Proj_pred(res)));
201 res = optimize_node(res);
203 IRN_VRFY_IRG(res, irg);
209 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
213 assert(arg->op == op_Cond);
214 arg->attr.c.kind = fragmentary;
215 arg->attr.c.default_proj = max_proj;
216 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
221 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
225 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
226 res = optimize_node(res);
227 IRN_VRFY_IRG(res, irg);
232 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
236 assert(is_atomic_type(to_tp));
238 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
239 res->attr.cast.totype = to_tp;
240 res = optimize_node(res);
241 IRN_VRFY_IRG(res, irg);
246 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
250 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
251 res = optimize_node (res);
252 IRN_VRFY_IRG(res, irg);
257 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
258 ir_node *op1, ir_node *op2, ir_mode *mode)
265 res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
266 res = optimize_node(res);
267 IRN_VRFY_IRG(res, irg);
272 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
273 ir_node *op1, ir_node *op2, ir_mode *mode)
280 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
281 res = optimize_node (res);
282 IRN_VRFY_IRG(res, irg);
287 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
288 ir_node *op, ir_mode *mode)
292 res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
293 res = optimize_node(res);
294 IRN_VRFY_IRG(res, irg);
299 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
300 ir_node *op1, ir_node *op2, ir_mode *mode)
307 res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
308 res = optimize_node(res);
309 IRN_VRFY_IRG(res, irg);
314 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
315 ir_node *memop, ir_node *op1, ir_node *op2)
323 res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
324 res = optimize_node(res);
325 IRN_VRFY_IRG(res, irg);
330 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
331 ir_node *memop, ir_node *op1, ir_node *op2)
339 res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
340 res = optimize_node(res);
341 IRN_VRFY_IRG(res, irg);
346 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
347 ir_node *memop, ir_node *op1, ir_node *op2)
355 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
356 res = optimize_node(res);
357 IRN_VRFY_IRG(res, irg);
362 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
363 ir_node *memop, ir_node *op1, ir_node *op2)
371 res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
372 res = optimize_node(res);
373 IRN_VRFY_IRG(res, irg);
378 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
379 ir_node *op1, ir_node *op2, ir_mode *mode)
386 res = new_ir_node(db, irg, block, op_And, mode, 2, in);
387 res = optimize_node(res);
388 IRN_VRFY_IRG(res, irg);
393 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
394 ir_node *op1, ir_node *op2, ir_mode *mode)
401 res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
402 res = optimize_node(res);
403 IRN_VRFY_IRG(res, irg);
408 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
409 ir_node *op1, ir_node *op2, ir_mode *mode)
416 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
417 res = optimize_node (res);
418 IRN_VRFY_IRG(res, irg);
423 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
424 ir_node *op, ir_mode *mode)
428 res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
429 res = optimize_node(res);
430 IRN_VRFY_IRG(res, irg);
435 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
436 ir_node *op, ir_node *k, ir_mode *mode)
443 res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
444 res = optimize_node(res);
445 IRN_VRFY_IRG(res, irg);
450 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
451 ir_node *op, ir_node *k, ir_mode *mode)
458 res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
459 res = optimize_node(res);
460 IRN_VRFY_IRG(res, irg);
465 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
466 ir_node *op, ir_node *k, ir_mode *mode)
473 res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
474 res = optimize_node(res);
475 IRN_VRFY_IRG(res, irg);
480 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
481 ir_node *op, ir_node *k, ir_mode *mode)
488 res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
489 res = optimize_node(res);
490 IRN_VRFY_IRG(res, irg);
495 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
496 ir_node *op, ir_mode *mode)
500 res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
501 res = optimize_node (res);
502 IRN_VRFY_IRG(res, irg);
507 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
508 ir_node *op1, ir_node *op2)
515 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
516 res = optimize_node(res);
517 IRN_VRFY_IRG(res, irg);
522 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
526 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
527 res = optimize_node (res);
528 IRN_VRFY_IRG (res, irg);
533 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
537 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
538 res->attr.c.kind = dense;
539 res->attr.c.default_proj = 0;
540 res = optimize_node (res);
541 IRN_VRFY_IRG(res, irg);
546 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
547 ir_node *callee, int arity, ir_node **in, type *tp)
554 NEW_ARR_A(ir_node *, r_in, r_arity);
557 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
559 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
561 assert((get_unknown_type() == tp) || is_Method_type(tp));
562 set_Call_type(res, tp);
563 res->attr.call.exc.pin_state = op_pin_state_pinned;
564 res->attr.call.callee_arr = NULL;
565 res = optimize_node(res);
566 IRN_VRFY_IRG(res, irg);
571 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
572 ir_node *store, int arity, ir_node **in)
579 NEW_ARR_A (ir_node *, r_in, r_arity);
581 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
582 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
583 res = optimize_node(res);
584 IRN_VRFY_IRG(res, irg);
589 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
596 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
597 res = optimize_node(res);
598 IRN_VRFY_IRG(res, irg);
603 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
604 ir_node *store, ir_node *adr, ir_mode *mode)
611 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
612 res->attr.load.exc.pin_state = op_pin_state_pinned;
613 res->attr.load.load_mode = mode;
614 res->attr.load.volatility = volatility_non_volatile;
615 res = optimize_node(res);
616 IRN_VRFY_IRG(res, irg);
621 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
622 ir_node *store, ir_node *adr, ir_node *val)
630 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
631 res->attr.store.exc.pin_state = op_pin_state_pinned;
632 res->attr.store.volatility = volatility_non_volatile;
633 res = optimize_node(res);
634 IRN_VRFY_IRG(res, irg);
639 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
640 ir_node *size, type *alloc_type, where_alloc where)
647 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
648 res->attr.a.exc.pin_state = op_pin_state_pinned;
649 res->attr.a.where = where;
650 res->attr.a.type = alloc_type;
651 res = optimize_node(res);
652 IRN_VRFY_IRG(res, irg);
657 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
658 ir_node *ptr, ir_node *size, type *free_type, where_alloc where)
666 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
667 res->attr.f.where = where;
668 res->attr.f.type = free_type;
669 res = optimize_node(res);
670 IRN_VRFY_IRG(res, irg);
675 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
676 int arity, ir_node **in, entity *ent)
682 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
685 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
688 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
689 res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
690 res->attr.s.ent = ent;
691 res = optimize_node(res);
692 IRN_VRFY_IRG(res, irg);
697 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
698 ir_node *objptr, type *ent)
705 NEW_ARR_A(ir_node *, r_in, r_arity);
709 res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
710 res->attr.io.ent = ent;
712 /* res = optimize(res); */
713 IRN_VRFY_IRG(res, irg);
718 new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
719 symconst_kind symkind, type *tp) {
723 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
728 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
730 res->attr.i.num = symkind;
731 res->attr.i.sym = value;
734 res = optimize_node(res);
735 IRN_VRFY_IRG(res, irg);
740 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
741 symconst_kind symkind)
743 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
747 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
748 symconst_symbol sym = {(type *)symbol};
749 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
752 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
753 symconst_symbol sym = {(type *)symbol};
754 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
757 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
758 symconst_symbol sym = {symbol};
759 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
762 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
763 symconst_symbol sym = {symbol};
764 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
768 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
772 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
773 res = optimize_node(res);
774 IRN_VRFY_IRG(res, irg);
779 new_rd_Bad (ir_graph *irg)
785 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
787 ir_node *in[2], *res;
791 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
792 res->attr.confirm_cmp = cmp;
793 res = optimize_node (res);
794 IRN_VRFY_IRG(res, irg);
799 new_rd_Unknown (ir_graph *irg, ir_mode *m)
801 return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
805 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
810 in[0] = get_Call_ptr(call);
811 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
812 /* res->attr.callbegin.irg = irg; */
813 res->attr.callbegin.call = call;
814 res = optimize_node(res);
815 IRN_VRFY_IRG(res, irg);
820 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
824 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
826 IRN_VRFY_IRG(res, irg);
831 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
835 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
836 irg->end_except = res;
837 IRN_VRFY_IRG (res, irg);
842 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
846 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
847 res = optimize_node(res);
848 IRN_VRFY_IRG(res, irg);
853 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
858 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
859 res->attr.filter.proj = proj;
860 res->attr.filter.in_cg = NULL;
861 res->attr.filter.backedge = NULL;
864 assert(get_Proj_pred(res));
865 assert(get_nodes_block(get_Proj_pred(res)));
867 res = optimize_node(res);
868 IRN_VRFY_IRG(res, irg);
873 new_rd_NoMem (ir_graph *irg) {
878 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
879 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
888 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
891 res = optimize_node(res);
892 IRN_VRFY_IRG(res, irg);
897 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
898 return new_rd_Block(NULL, irg, arity, in);
900 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
901 return new_rd_Start(NULL, irg, block);
903 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
904 return new_rd_End(NULL, irg, block);
906 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
907 return new_rd_Jmp(NULL, irg, block);
909 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
910 return new_rd_Cond(NULL, irg, block, c);
912 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
913 ir_node *store, int arity, ir_node **in) {
914 return new_rd_Return(NULL, irg, block, store, arity, in);
916 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
917 ir_node *store, ir_node *obj) {
918 return new_rd_Raise(NULL, irg, block, store, obj);
920 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
921 ir_mode *mode, tarval *con) {
922 return new_rd_Const(NULL, irg, block, mode, con);
925 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
926 ir_mode *mode, long value) {
927 return new_rd_Const_long(NULL, irg, block, mode, value);
931 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
932 symconst_symbol value, symconst_kind symkind) {
933 return new_rd_SymConst(NULL, irg, block, value, symkind);
935 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
936 ir_node *objptr, int n_index, ir_node **index,
938 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
940 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
942 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
944 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
945 ir_node *callee, int arity, ir_node **in,
947 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
949 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
950 ir_node *op1, ir_node *op2, ir_mode *mode) {
951 return new_rd_Add(NULL, irg, block, op1, op2, mode);
953 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
954 ir_node *op1, ir_node *op2, ir_mode *mode) {
955 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
957 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
958 ir_node *op, ir_mode *mode) {
959 return new_rd_Minus(NULL, irg, block, op, mode);
961 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
962 ir_node *op1, ir_node *op2, ir_mode *mode) {
963 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
965 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
966 ir_node *memop, ir_node *op1, ir_node *op2) {
967 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
969 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
970 ir_node *memop, ir_node *op1, ir_node *op2) {
971 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
973 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
974 ir_node *memop, ir_node *op1, ir_node *op2) {
975 return new_rd_Div(NULL, irg, block, memop, op1, op2);
977 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
978 ir_node *memop, ir_node *op1, ir_node *op2) {
979 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
981 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
982 ir_node *op, ir_mode *mode) {
983 return new_rd_Abs(NULL, irg, block, op, mode);
985 ir_node *new_r_And (ir_graph *irg, ir_node *block,
986 ir_node *op1, ir_node *op2, ir_mode *mode) {
987 return new_rd_And(NULL, irg, block, op1, op2, mode);
989 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
990 ir_node *op1, ir_node *op2, ir_mode *mode) {
991 return new_rd_Or(NULL, irg, block, op1, op2, mode);
993 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
994 ir_node *op1, ir_node *op2, ir_mode *mode) {
995 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
997 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
998 ir_node *op, ir_mode *mode) {
999 return new_rd_Not(NULL, irg, block, op, mode);
1001 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1002 ir_node *op1, ir_node *op2) {
1003 return new_rd_Cmp(NULL, irg, block, op1, op2);
1005 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1006 ir_node *op, ir_node *k, ir_mode *mode) {
1007 return new_rd_Shl(NULL, irg, block, op, k, mode);
1009 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1010 ir_node *op, ir_node *k, ir_mode *mode) {
1011 return new_rd_Shr(NULL, irg, block, op, k, mode);
1013 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1014 ir_node *op, ir_node *k, ir_mode *mode) {
1015 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1017 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1018 ir_node *op, ir_node *k, ir_mode *mode) {
1019 return new_rd_Rot(NULL, irg, block, op, k, mode);
1021 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1022 ir_node *op, ir_mode *mode) {
1023 return new_rd_Conv(NULL, irg, block, op, mode);
1025 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
1026 return new_rd_Cast(NULL, irg, block, op, to_tp);
1028 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1029 ir_node **in, ir_mode *mode) {
1030 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1032 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1033 ir_node *store, ir_node *adr, ir_mode *mode) {
1034 return new_rd_Load(NULL, irg, block, store, adr, mode);
1036 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1037 ir_node *store, ir_node *adr, ir_node *val) {
1038 return new_rd_Store(NULL, irg, block, store, adr, val);
1040 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1041 ir_node *size, type *alloc_type, where_alloc where) {
1042 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1044 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1045 ir_node *ptr, ir_node *size, type *free_type, where_alloc where) {
1046 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1048 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1049 return new_rd_Sync(NULL, irg, block, arity, in);
1051 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1052 ir_mode *mode, long proj) {
1053 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1055 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1057 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1059 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1060 int arity, ir_node **in) {
1061 return new_rd_Tuple(NULL, irg, block, arity, in );
1063 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1064 ir_node *val, ir_mode *mode) {
1065 return new_rd_Id(NULL, irg, block, val, mode);
1067 ir_node *new_r_Bad (ir_graph *irg) {
1068 return new_rd_Bad(irg);
1070 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1071 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1073 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1074 return new_rd_Unknown(irg, m);
1076 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1077 return new_rd_CallBegin(NULL, irg, block, callee);
1079 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1080 return new_rd_EndReg(NULL, irg, block);
1082 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1083 return new_rd_EndExcept(NULL, irg, block);
1085 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1086 return new_rd_Break(NULL, irg, block);
1088 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1089 ir_mode *mode, long proj) {
1090 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1092 ir_node *new_r_NoMem (ir_graph *irg) {
1093 return new_rd_NoMem(irg);
1095 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1096 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1097 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1101 /** ********************/
1102 /** public interfaces */
1103 /** construction tools */
1107 * - create a new Start node in the current block
1109 * @return s - pointer to the created Start node
1114 new_d_Start (dbg_info* db)
1118 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1119 op_Start, mode_T, 0, NULL);
1120 /* res->attr.start.irg = current_ir_graph; */
1122 res = optimize_node(res);
1123 IRN_VRFY_IRG(res, current_ir_graph);
1128 new_d_End (dbg_info* db)
1131 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1132 op_End, mode_X, -1, NULL);
1133 res = optimize_node(res);
1134 IRN_VRFY_IRG(res, current_ir_graph);
1139 /* Constructs a Block with a fixed number of predecessors.
1140 Does set current_block. Can be used with automatic Phi
1141 node construction. */
1143 new_d_Block (dbg_info* db, int arity, ir_node **in)
1147 bool has_unknown = false;
1149 res = new_rd_Block(db, current_ir_graph, arity, in);
1151 /* Create and initialize array for Phi-node construction. */
1152 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1153 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1154 current_ir_graph->n_loc);
1155 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1158 for (i = arity-1; i >= 0; i--)
1159 if (get_irn_op(in[i]) == op_Unknown) {
1164 if (!has_unknown) res = optimize_node(res);
1165 current_ir_graph->current_block = res;
1167 IRN_VRFY_IRG(res, current_ir_graph);
1172 /* ***********************************************************************/
1173 /* Methods necessary for automatic Phi node creation */
1175 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1176 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1177 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1178 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1180 Call Graph: ( A ---> B == A "calls" B)
1182 get_value mature_immBlock
1190 get_r_value_internal |
1194 new_rd_Phi0 new_rd_Phi_in
1196 * *************************************************************************** */
1198 /** Creates a Phi node with 0 predecessors */
1199 static INLINE ir_node *
1200 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1204 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1205 IRN_VRFY_IRG(res, irg);
1209 /* There are two implementations of the Phi node construction. The first
1210 is faster, but does not work for blocks with more than 2 predecessors.
1211 The second works always but is slower and causes more unnecessary Phi
1213 Select the implementations by the following preprocessor flag set in
1215 #if USE_FAST_PHI_CONSTRUCTION
1217 /* This is a stack used for allocating and deallocating nodes in
1218 new_rd_Phi_in. The original implementation used the obstack
1219 to model this stack, now it is explicit. This reduces side effects.
1221 #if USE_EXPLICIT_PHI_IN_STACK
1223 new_Phi_in_stack(void) {
1226 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1228 res->stack = NEW_ARR_F (ir_node *, 0);
1235 free_Phi_in_stack(Phi_in_stack *s) {
1236 DEL_ARR_F(s->stack);
1240 free_to_Phi_in_stack(ir_node *phi) {
1241 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1242 current_ir_graph->Phi_in_stack->pos)
1243 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1245 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1247 (current_ir_graph->Phi_in_stack->pos)++;
1250 static INLINE ir_node *
1251 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1252 int arity, ir_node **in) {
1254 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1255 int pos = current_ir_graph->Phi_in_stack->pos;
1259 /* We need to allocate a new node */
1260 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1261 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1263 /* reuse the old node and initialize it again. */
1266 assert (res->kind == k_ir_node);
1267 assert (res->op == op_Phi);
1271 assert (arity >= 0);
1272 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1273 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1275 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1277 (current_ir_graph->Phi_in_stack->pos)--;
1281 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1283 /* Creates a Phi node with a given, fixed array **in of predecessors.
1284 If the Phi node is unnecessary, as the same value reaches the block
1285 through all control flow paths, it is eliminated and the value
1286 returned directly. This constructor is only intended for use in
1287 the automatic Phi node generation triggered by get_value or mature.
1288 The implementation is quite tricky and depends on the fact, that
1289 the nodes are allocated on a stack:
1290 The in array contains predecessors and NULLs. The NULLs appear,
1291 if get_r_value_internal, that computed the predecessors, reached
1292 the same block on two paths. In this case the same value reaches
1293 this block on both paths, there is no definition in between. We need
1294 not allocate a Phi where these path's merge, but we have to communicate
1295 this fact to the caller. This happens by returning a pointer to the
1296 node the caller _will_ allocate. (Yes, we predict the address. We can
1297 do so because the nodes are allocated on the obstack.) The caller then
1298 finds a pointer to itself and, when this routine is called again,
1301 static INLINE ir_node *
1302 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1305 ir_node *res, *known;
1307 /* Allocate a new node on the obstack. This can return a node to
1308 which some of the pointers in the in-array already point.
1309 Attention: the constructor copies the in array, i.e., the later
1310 changes to the array in this routine do not affect the
1311 constructed node! If the in array contains NULLs, there will be
1312 missing predecessors in the returned node. Is this a possible
1313 internal state of the Phi node generation? */
1314 #if USE_EXPLICIT_PHI_IN_STACK
1315 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1317 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1318 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1321 /* The in-array can contain NULLs. These were returned by
1322 get_r_value_internal if it reached the same block/definition on a
1323 second path. The NULLs are replaced by the node itself to
1324 simplify the test in the next loop. */
1325 for (i = 0; i < ins; ++i) {
1330 /* This loop checks whether the Phi has more than one predecessor.
1331 If so, it is a real Phi node and we break the loop. Else the Phi
1332 node merges the same definition on several paths and therefore is
1334 for (i = 0; i < ins; ++i) {
1335 if (in[i] == res || in[i] == known)
1344 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1346 #if USE_EXPLICIT_PHI_IN_STACK
1347 free_to_Phi_in_stack(res);
1349 edges_node_deleted(res, current_ir_graph);
1350 obstack_free(current_ir_graph->obst, res);
1354 res = optimize_node (res);
1355 IRN_VRFY_IRG(res, irg);
1358 /* return the pointer to the Phi node. This node might be deallocated! */
1363 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1366 allocates and returns this node. The routine called to allocate the
1367 node might optimize it away and return a real value, or even a pointer
1368 to a deallocated Phi node on top of the obstack!
1369 This function is called with an in-array of proper size. **/
1371 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1373 ir_node *prevBlock, *res;
1376 /* This loop goes to all predecessor blocks of the block the Phi node is in
1377 and there finds the operands of the Phi node by calling
1378 get_r_value_internal. */
1379 for (i = 1; i <= ins; ++i) {
1380 assert (block->in[i]);
1381 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1383 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1386 /* After collecting all predecessors into the array nin a new Phi node
1387 with these predecessors is created. This constructor contains an
1388 optimization: If all predecessors of the Phi node are identical it
1389 returns the only operand instead of a new Phi node. If the value
1390 passes two different control flow edges without being defined, and
1391 this is the second path treated, a pointer to the node that will be
1392 allocated for the first path (recursion) is returned. We already
1393 know the address of this node, as it is the next node to be allocated
1394 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1395 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1397 /* Now we now the value for "pos" and can enter it in the array with
1398 all known local variables. Attention: this might be a pointer to
1399 a node, that later will be allocated!!! See new_rd_Phi_in.
1400 If this is called in mature, after some set_value in the same block,
1401 the proper value must not be overwritten:
1403 get_value (makes Phi0, put's it into graph_arr)
1404 set_value (overwrites Phi0 in graph_arr)
1405 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1408 if (!block->attr.block.graph_arr[pos]) {
1409 block->attr.block.graph_arr[pos] = res;
1411 /* printf(" value already computed by %s\n",
1412 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1418 /* This function returns the last definition of a variable. In case
1419 this variable was last defined in a previous block, Phi nodes are
1420 inserted. If the part of the firm graph containing the definition
1421 is not yet constructed, a dummy Phi node is returned. */
1423 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1426 /* There are 4 cases to treat.
1428 1. The block is not mature and we visit it the first time. We can not
1429 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1430 predecessors is returned. This node is added to the linked list (field
1431 "link") of the containing block to be completed when this block is
1432 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1435 2. The value is already known in this block, graph_arr[pos] is set and we
1436 visit the block the first time. We can return the value without
1437 creating any new nodes.
1439 3. The block is mature and we visit it the first time. A Phi node needs
1440 to be created (phi_merge). If the Phi is not needed, as all it's
1441 operands are the same value reaching the block through different
1442 paths, it's optimized away and the value itself is returned.
1444 4. The block is mature, and we visit it the second time. Now two
1445 subcases are possible:
1446 * The value was computed completely the last time we were here. This
1447 is the case if there is no loop. We can return the proper value.
1448 * The recursion that visited this node and set the flag did not
1449 return yet. We are computing a value in a loop and need to
1450 break the recursion without knowing the result yet.
1451 @@@ strange case. Straight forward we would create a Phi before
1452 starting the computation of it's predecessors. In this case we will
1453 find a Phi here in any case. The problem is that this implementation
1454 only creates a Phi after computing the predecessors, so that it is
1455 hard to compute self references of this Phi. @@@
1456 There is no simple check for the second subcase. Therefore we check
1457 for a second visit and treat all such cases as the second subcase.
1458 Anyways, the basic situation is the same: we reached a block
1459 on two paths without finding a definition of the value: No Phi
1460 nodes are needed on both paths.
1461 We return this information "Two paths, no Phi needed" by a very tricky
1462 implementation that relies on the fact that an obstack is a stack and
1463 will return a node with the same address on different allocations.
1464 Look also at phi_merge and new_rd_phi_in to understand this.
1465 @@@ Unfortunately this does not work, see testprogram
1466 three_cfpred_example.
1470 /* case 4 -- already visited. */
1471 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1473 /* visited the first time */
1474 set_irn_visited(block, get_irg_visited(current_ir_graph));
1476 /* Get the local valid value */
1477 res = block->attr.block.graph_arr[pos];
1479 /* case 2 -- If the value is actually computed, return it. */
1480 if (res) return res;
1482 if (block->attr.block.matured) { /* case 3 */
1484 /* The Phi has the same amount of ins as the corresponding block. */
1485 int ins = get_irn_arity(block);
1487 NEW_ARR_A (ir_node *, nin, ins);
1489 /* Phi merge collects the predecessors and then creates a node. */
1490 res = phi_merge (block, pos, mode, nin, ins);
1492 } else { /* case 1 */
1493 /* The block is not mature, we don't know how many in's are needed. A Phi
1494 with zero predecessors is created. Such a Phi node is called Phi0
1495 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1496 to the list of Phi0 nodes in this block to be matured by mature_immBlock
1498 The Phi0 has to remember the pos of it's internal value. If the real
1499 Phi is computed, pos is used to update the array with the local
1502 res = new_rd_Phi0 (current_ir_graph, block, mode);
1503 res->attr.phi0_pos = pos;
1504 res->link = block->link;
1508 /* If we get here, the frontend missed a use-before-definition error */
1511 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1512 assert (mode->code >= irm_F && mode->code <= irm_P);
1513 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1514 tarval_mode_null[mode->code]);
1517 /* The local valid value is available now. */
1518 block->attr.block.graph_arr[pos] = res;
1526 it starts the recursion. This causes an Id at the entry of
1527 every block that has no definition of the value! **/
1529 #if USE_EXPLICIT_PHI_IN_STACK
1531 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1532 void free_Phi_in_stack(Phi_in_stack *s) { }
1535 static INLINE ir_node *
1536 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1537 ir_node **in, int ins, ir_node *phi0)
1540 ir_node *res, *known;
1542 /* Allocate a new node on the obstack. The allocation copies the in
1544 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1545 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1547 /* This loop checks whether the Phi has more than one predecessor.
1548 If so, it is a real Phi node and we break the loop. Else the
1549 Phi node merges the same definition on several paths and therefore
1550 is not needed. Don't consider Bad nodes! */
1552 for (i=0; i < ins; ++i)
1556 in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
1558 /* Optimize self referencing Phis: We can't detect them yet properly, as
1559 they still refer to the Phi0 they will replace. So replace right now. */
1560 if (phi0 && in[i] == phi0) in[i] = res;
1562 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1570 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1573 edges_node_deleted(res, current_ir_graph);
1574 obstack_free (current_ir_graph->obst, res);
1575 if (is_Phi(known)) {
1576 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1577 order, an enclosing Phi know may get superfluous. */
1578 res = optimize_in_place_2(known);
1580 exchange(known, res);
1586 /* A undefined value, e.g., in unreachable code. */
1590 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1591 IRN_VRFY_IRG(res, irg);
1592 /* Memory Phis in endless loops must be kept alive.
1593 As we can't distinguish these easily we keep all of them alive. */
1594 if ((res->op == op_Phi) && (mode == mode_M))
1595 add_End_keepalive(irg->end, res);
1602 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1604 #if PRECISE_EXC_CONTEXT
1606 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1608 /* Construct a new frag_array for node n.
1609 Copy the content from the current graph_arr of the corresponding block:
1610 this is the current state.
1611 Set ProjM(n) as current memory state.
1612 Further the last entry in frag_arr of current block points to n. This
1613 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1615 static INLINE ir_node ** new_frag_arr (ir_node *n)
1620 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1621 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1622 sizeof(ir_node *)*current_ir_graph->n_loc);
1624 /* turn off optimization before allocating Proj nodes, as res isn't
1626 opt = get_opt_optimize(); set_optimize(0);
1627 /* Here we rely on the fact that all frag ops have Memory as first result! */
1628 if (get_irn_op(n) == op_Call)
1629 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1631 assert((pn_Quot_M == pn_DivMod_M) &&
1632 (pn_Quot_M == pn_Div_M) &&
1633 (pn_Quot_M == pn_Mod_M) &&
1634 (pn_Quot_M == pn_Load_M) &&
1635 (pn_Quot_M == pn_Store_M) &&
1636 (pn_Quot_M == pn_Alloc_M) );
1637 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1641 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1646 * returns the frag_arr from a node
1648 static INLINE ir_node **
1649 get_frag_arr (ir_node *n) {
1650 switch (get_irn_opcode(n)) {
1652 return n->attr.call.exc.frag_arr;
1654 return n->attr.a.exc.frag_arr;
1656 return n->attr.load.exc.frag_arr;
1658 return n->attr.store.exc.frag_arr;
1660 return n->attr.except.frag_arr;
1665 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1667 if (!frag_arr[pos]) frag_arr[pos] = val;
1668 if (frag_arr[current_ir_graph->n_loc - 1]) {
1669 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1670 assert(arr != frag_arr && "Endless recursion detected");
1671 set_frag_value(arr, pos, val);
1676 for (i = 0; i < 1000; ++i) {
1677 if (!frag_arr[pos]) {
1678 frag_arr[pos] = val;
1680 if (frag_arr[current_ir_graph->n_loc - 1]) {
1681 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1687 assert(0 && "potential endless recursion");
1692 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1696 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1698 frag_arr = get_frag_arr(cfOp);
1699 res = frag_arr[pos];
1701 if (block->attr.block.graph_arr[pos]) {
1702 /* There was a set_value after the cfOp and no get_value before that
1703 set_value. We must build a Phi node now. */
1704 if (block->attr.block.matured) {
1705 int ins = get_irn_arity(block);
1707 NEW_ARR_A (ir_node *, nin, ins);
1708 res = phi_merge(block, pos, mode, nin, ins);
1710 res = new_rd_Phi0 (current_ir_graph, block, mode);
1711 res->attr.phi0_pos = pos;
1712 res->link = block->link;
1716 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1717 but this should be better: (remove comment if this works) */
1718 /* It's a Phi, we can write this into all graph_arrs with NULL */
1719 set_frag_value(block->attr.block.graph_arr, pos, res);
1721 res = get_r_value_internal(block, pos, mode);
1722 set_frag_value(block->attr.block.graph_arr, pos, res);
1730 computes the predecessors for the real phi node, and then
1731 allocates and returns this node. The routine called to allocate the
1732 node might optimize it away and return a real value.
1733 This function must be called with an in-array of proper size. **/
1735 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1737 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
1740 /* If this block has no value at pos create a Phi0 and remember it
1741 in graph_arr to break recursions.
1742 Else we may not set graph_arr as there a later value is remembered. */
1744 if (!block->attr.block.graph_arr[pos]) {
1745 if (block == get_irg_start_block(current_ir_graph)) {
1746 /* Collapsing to Bad tarvals is no good idea.
1747 So we call a user-supplied routine here that deals with this case as
1748 appropriate for the given language. Sorryly the only help we can give
1749 here is the position.
1751 Even if all variables are defined before use, it can happen that
1752 we get to the start block, if a cond has been replaced by a tuple
1753 (bad, jmp). In this case we call the function needlessly, eventually
1754 generating an non existant error.
1755 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1758 if (default_initialize_local_variable)
1759 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
1761 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1762 /* We don't need to care about exception ops in the start block.
1763 There are none by definition. */
1764 return block->attr.block.graph_arr[pos];
1766 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1767 block->attr.block.graph_arr[pos] = phi0;
1768 #if PRECISE_EXC_CONTEXT
1769 if (get_opt_precise_exc_context()) {
1770 /* Set graph_arr for fragile ops. Also here we should break recursion.
1771 We could choose a cyclic path through an cfop. But the recursion would
1772 break at some point. */
1773 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1779 /* This loop goes to all predecessor blocks of the block the Phi node
1780 is in and there finds the operands of the Phi node by calling
1781 get_r_value_internal. */
1782 for (i = 1; i <= ins; ++i) {
1783 prevCfOp = skip_Proj(block->in[i]);
1785 if (is_Bad(prevCfOp)) {
1786 /* In case a Cond has been optimized we would get right to the start block
1787 with an invalid definition. */
1788 nin[i-1] = new_Bad();
1791 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1793 if (!is_Bad(prevBlock)) {
1794 #if PRECISE_EXC_CONTEXT
1795 if (get_opt_precise_exc_context() &&
1796 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1797 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1798 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1801 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1803 nin[i-1] = new_Bad();
1807 /* We want to pass the Phi0 node to the constructor: this finds additional
1808 optimization possibilities.
1809 The Phi0 node either is allocated in this function, or it comes from
1810 a former call to get_r_value_internal. In this case we may not yet
1811 exchange phi0, as this is done in mature_immBlock. */
1813 phi0_all = block->attr.block.graph_arr[pos];
1814 if (!((get_irn_op(phi0_all) == op_Phi) &&
1815 (get_irn_arity(phi0_all) == 0) &&
1816 (get_nodes_block(phi0_all) == block)))
1822 /* After collecting all predecessors into the array nin a new Phi node
1823 with these predecessors is created. This constructor contains an
1824 optimization: If all predecessors of the Phi node are identical it
1825 returns the only operand instead of a new Phi node. */
1826 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
1828 /* In case we allocated a Phi0 node at the beginning of this procedure,
1829 we need to exchange this Phi0 with the real Phi. */
1831 exchange(phi0, res);
1832 block->attr.block.graph_arr[pos] = res;
1833 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1834 only an optimization. */
1840 /* This function returns the last definition of a variable. In case
1841 this variable was last defined in a previous block, Phi nodes are
1842 inserted. If the part of the firm graph containing the definition
1843 is not yet constructed, a dummy Phi node is returned. */
1845 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1848 /* There are 4 cases to treat.
1850 1. The block is not mature and we visit it the first time. We can not
1851 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1852 predecessors is returned. This node is added to the linked list (field
1853 "link") of the containing block to be completed when this block is
1854 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1857 2. The value is already known in this block, graph_arr[pos] is set and we
1858 visit the block the first time. We can return the value without
1859 creating any new nodes.
1861 3. The block is mature and we visit it the first time. A Phi node needs
1862 to be created (phi_merge). If the Phi is not needed, as all it's
1863 operands are the same value reaching the block through different
1864 paths, it's optimized away and the value itself is returned.
1866 4. The block is mature, and we visit it the second time. Now two
1867 subcases are possible:
1868 * The value was computed completely the last time we were here. This
1869 is the case if there is no loop. We can return the proper value.
1870 * The recursion that visited this node and set the flag did not
1871 return yet. We are computing a value in a loop and need to
1872 break the recursion. This case only happens if we visited
1873 the same block with phi_merge before, which inserted a Phi0.
1874 So we return the Phi0.
1877 /* case 4 -- already visited. */
1878 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1879 /* As phi_merge allocates a Phi0 this value is always defined. Here
1880 is the critical difference of the two algorithms. */
1881 assert(block->attr.block.graph_arr[pos]);
1882 return block->attr.block.graph_arr[pos];
1885 /* visited the first time */
1886 set_irn_visited(block, get_irg_visited(current_ir_graph));
1888 /* Get the local valid value */
1889 res = block->attr.block.graph_arr[pos];
1891 /* case 2 -- If the value is actually computed, return it. */
1892 if (res) { return res; };
1894 if (block->attr.block.matured) { /* case 3 */
1896 /* The Phi has the same amount of ins as the corresponding block. */
1897 int ins = get_irn_arity(block);
1899 NEW_ARR_A (ir_node *, nin, ins);
1901 /* Phi merge collects the predecessors and then creates a node. */
1902 res = phi_merge (block, pos, mode, nin, ins);
1904 } else { /* case 1 */
1905 /* The block is not mature, we don't know how many in's are needed. A Phi
1906 with zero predecessors is created. Such a Phi node is called Phi0
1907 node. The Phi0 is then added to the list of Phi0 nodes in this block
1908 to be matured by mature_immBlock later.
1909 The Phi0 has to remember the pos of it's internal value. If the real
1910 Phi is computed, pos is used to update the array with the local
1912 res = new_rd_Phi0 (current_ir_graph, block, mode);
1913 res->attr.phi0_pos = pos;
1914 res->link = block->link;
1918 /* If we get here, the frontend missed a use-before-definition error */
1921 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1922 assert (mode->code >= irm_F && mode->code <= irm_P);
1923 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1924 get_mode_null(mode));
1927 /* The local valid value is available now. */
1928 block->attr.block.graph_arr[pos] = res;
1933 #endif /* USE_FAST_PHI_CONSTRUCTION */
1935 /* ************************************************************************** */
1937 /** Finalize a Block node, when all control flows are known. */
1938 /** Acceptable parameters are only Block nodes. */
1940 mature_immBlock (ir_node *block)
1947 assert (get_irn_opcode(block) == iro_Block);
1948 /* @@@ should be commented in
1949 assert (!get_Block_matured(block) && "Block already matured"); */
1951 if (!get_Block_matured(block)) {
1952 ins = ARR_LEN (block->in)-1;
1953 /* Fix block parameters */
1954 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1956 /* An array for building the Phi nodes. */
1957 NEW_ARR_A (ir_node *, nin, ins);
1959 /* Traverse a chain of Phi nodes attached to this block and mature
1961 for (n = block->link; n; n=next) {
1962 inc_irg_visited(current_ir_graph);
1964 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1967 block->attr.block.matured = 1;
1969 /* Now, as the block is a finished firm node, we can optimize it.
1970 Since other nodes have been allocated since the block was created
1971 we can not free the node on the obstack. Therefore we have to call
1973 Unfortunately the optimization does not change a lot, as all allocated
1974 nodes refer to the unoptimized node.
1975 We can call _2, as global cse has no effect on blocks. */
1976 block = optimize_in_place_2(block);
1977 IRN_VRFY_IRG(block, current_ir_graph);
1982 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1984 return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
1989 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1991 return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
1996 new_d_Const_long(dbg_info* db, ir_mode *mode, long value)
1998 return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value);
2002 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
2004 return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
2010 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
2012 return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
2017 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
2019 return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
2024 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
2027 assert(arg->op == op_Cond);
2028 arg->attr.c.kind = fragmentary;
2029 arg->attr.c.default_proj = max_proj;
2030 res = new_Proj (arg, mode_X, max_proj);
2035 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
2037 return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
2042 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
2044 return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
2048 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
2050 return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
2055 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2057 return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
2062 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2064 return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
2070 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
2072 return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
2077 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2079 return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
2084 * allocate the frag array
2086 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2087 if (get_opt_precise_exc_context()) {
2088 if ((current_ir_graph->phase_state == phase_building) &&
2089 (get_irn_op(res) == op) && /* Could be optimized away. */
2090 !*frag_store) /* Could be a cse where the arr is already set. */ {
2091 *frag_store = new_frag_arr(res);
2098 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2101 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
2103 res->attr.except.pin_state = op_pin_state_pinned;
2104 #if PRECISE_EXC_CONTEXT
2105 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2112 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2115 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
2117 res->attr.except.pin_state = op_pin_state_pinned;
2118 #if PRECISE_EXC_CONTEXT
2119 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2126 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2129 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
2131 res->attr.except.pin_state = op_pin_state_pinned;
2132 #if PRECISE_EXC_CONTEXT
2133 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2140 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
2143 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
2145 res->attr.except.pin_state = op_pin_state_pinned;
2146 #if PRECISE_EXC_CONTEXT
2147 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2154 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2156 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
2161 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2163 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2168 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2170 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2175 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2177 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2182 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2184 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2189 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2191 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2196 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2198 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2203 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2205 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2210 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2212 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2217 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2219 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2224 new_d_Jmp (dbg_info* db)
2226 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2230 new_d_Cond (dbg_info* db, ir_node *c)
2232 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2236 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2240 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2241 store, callee, arity, in, tp);
2242 #if PRECISE_EXC_CONTEXT
2243 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2250 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2252 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2257 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2259 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2264 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
2267 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2269 #if PRECISE_EXC_CONTEXT
2270 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2277 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2280 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2282 #if PRECISE_EXC_CONTEXT
2283 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2290 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2294 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2295 store, size, alloc_type, where);
2296 #if PRECISE_EXC_CONTEXT
2297 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2304 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr,
2305 ir_node *size, type *free_type, where_alloc where)
2307 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2308 store, ptr, size, free_type, where);
2312 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2313 /* GL: objptr was called frame before. Frame was a bad choice for the name
2314 as the operand could as well be a pointer to a dynamic object. */
2316 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2317 store, objptr, 0, NULL, ent);
2321 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2323 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2324 store, objptr, n_index, index, sel);
2328 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2330 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2331 store, objptr, ent));
2335 new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
2337 return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
2342 new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
2344 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2349 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2351 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2359 return _new_d_Bad();
2363 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2365 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2370 new_d_Unknown (ir_mode *m)
2372 return new_rd_Unknown(current_ir_graph, m);
2376 new_d_CallBegin (dbg_info *db, ir_node *call)
2379 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2384 new_d_EndReg (dbg_info *db)
2387 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2392 new_d_EndExcept (dbg_info *db)
2395 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2400 new_d_Break (dbg_info *db)
2402 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2406 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2408 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2415 return _new_d_NoMem();
2419 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2420 ir_node *ir_true, ir_mode *mode) {
2421 return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block,
2422 sel, ir_false, ir_true, mode);
2425 /* ********************************************************************* */
2426 /* Comfortable interface with automatic Phi node construction. */
2427 /* (Uses also constructors of ?? interface, except new_Block. */
2428 /* ********************************************************************* */
2430 /* * Block construction **/
2431 /* immature Block without predecessors */
2432 ir_node *new_d_immBlock (dbg_info* db) {
2435 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2436 /* creates a new dynamic in-array as length of in is -1 */
2437 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2438 current_ir_graph->current_block = res;
2439 res->attr.block.matured = 0;
2440 res->attr.block.dead = 0;
2441 /* res->attr.block.exc = exc_normal; */
2442 /* res->attr.block.handler_entry = 0; */
2443 res->attr.block.irg = current_ir_graph;
2444 res->attr.block.backedge = NULL;
2445 res->attr.block.in_cg = NULL;
2446 res->attr.block.cg_backedge = NULL;
2447 set_Block_block_visited(res, 0);
2449 /* Create and initialize array for Phi-node construction. */
2450 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2451 current_ir_graph->n_loc);
2452 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2454 /* Immature block may not be optimized! */
2455 IRN_VRFY_IRG(res, current_ir_graph);
2461 new_immBlock (void) {
2462 return new_d_immBlock(NULL);
2465 /* add an adge to a jmp/control flow node */
2467 add_immBlock_pred (ir_node *block, ir_node *jmp)
2469 if (block->attr.block.matured) {
2470 assert(0 && "Error: Block already matured!\n");
2473 assert(jmp != NULL);
2474 ARR_APP1(ir_node *, block->in, jmp);
2478 /* changing the current block */
2480 set_cur_block (ir_node *target)
2482 current_ir_graph->current_block = target;
2485 /* ************************ */
2486 /* parameter administration */
2488 /* get a value from the parameter array from the current block by its index */
2490 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2492 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2493 inc_irg_visited(current_ir_graph);
2495 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2497 /* get a value from the parameter array from the current block by its index */
2499 get_value (int pos, ir_mode *mode)
2501 return get_d_value(NULL, pos, mode);
2504 /* set a value at position pos in the parameter array from the current block */
2506 set_value (int pos, ir_node *value)
2508 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2509 assert(pos+1 < current_ir_graph->n_loc);
2510 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2513 /* get the current store */
2517 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2518 /* GL: one could call get_value instead */
2519 inc_irg_visited(current_ir_graph);
2520 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2523 /* set the current store */
2525 set_store (ir_node *store)
2527 /* GL: one could call set_value instead */
2528 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2529 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2533 keep_alive (ir_node *ka)
2535 add_End_keepalive(current_ir_graph->end, ka);
2538 /** Useful access routines **/
2539 /* Returns the current block of the current graph. To set the current
2540 block use set_cur_block. */
2541 ir_node *get_cur_block() {
2542 return get_irg_current_block(current_ir_graph);
2545 /* Returns the frame type of the current graph */
2546 type *get_cur_frame_type() {
2547 return get_irg_frame_type(current_ir_graph);
2551 /* ********************************************************************* */
2554 /* call once for each run of the library */
2556 init_cons(uninitialized_local_variable_func_t *func)
2558 default_initialize_local_variable = func;
2561 /* call for each graph */
2563 irg_finalize_cons (ir_graph *irg) {
2564 irg->phase_state = phase_high;
2568 irp_finalize_cons (void) {
2569 int i, n_irgs = get_irp_n_irgs();
2570 for (i = 0; i < n_irgs; i++) {
2571 irg_finalize_cons(get_irp_irg(i));
2573 irp->phase_state = phase_high;\
2579 ir_node *new_Block(int arity, ir_node **in) {
2580 return new_d_Block(NULL, arity, in);
2582 ir_node *new_Start (void) {
2583 return new_d_Start(NULL);
2585 ir_node *new_End (void) {
2586 return new_d_End(NULL);
2588 ir_node *new_Jmp (void) {
2589 return new_d_Jmp(NULL);
2591 ir_node *new_Cond (ir_node *c) {
2592 return new_d_Cond(NULL, c);
2594 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2595 return new_d_Return(NULL, store, arity, in);
2597 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2598 return new_d_Raise(NULL, store, obj);
2600 ir_node *new_Const (ir_mode *mode, tarval *con) {
2601 return new_d_Const(NULL, mode, con);
2604 ir_node *new_Const_long(ir_mode *mode, long value)
2606 return new_d_Const_long(NULL, mode, value);
2609 ir_node *new_Const_type(tarval *con, type *tp) {
2610 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2613 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
2614 return new_d_SymConst(NULL, value, kind);
2616 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2617 return new_d_simpleSel(NULL, store, objptr, ent);
2619 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2621 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2623 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2624 return new_d_InstOf (NULL, store, objptr, ent);
2626 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2628 return new_d_Call(NULL, store, callee, arity, in, tp);
2630 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2631 return new_d_Add(NULL, op1, op2, mode);
2633 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2634 return new_d_Sub(NULL, op1, op2, mode);
2636 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2637 return new_d_Minus(NULL, op, mode);
2639 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2640 return new_d_Mul(NULL, op1, op2, mode);
2642 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2643 return new_d_Quot(NULL, memop, op1, op2);
2645 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2646 return new_d_DivMod(NULL, memop, op1, op2);
2648 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2649 return new_d_Div(NULL, memop, op1, op2);
2651 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2652 return new_d_Mod(NULL, memop, op1, op2);
2654 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2655 return new_d_Abs(NULL, op, mode);
2657 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2658 return new_d_And(NULL, op1, op2, mode);
2660 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2661 return new_d_Or(NULL, op1, op2, mode);
2663 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2664 return new_d_Eor(NULL, op1, op2, mode);
2666 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2667 return new_d_Not(NULL, op, mode);
2669 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2670 return new_d_Shl(NULL, op, k, mode);
2672 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2673 return new_d_Shr(NULL, op, k, mode);
2675 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2676 return new_d_Shrs(NULL, op, k, mode);
2678 #define new_Rotate new_Rot
2679 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2680 return new_d_Rot(NULL, op, k, mode);
2682 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2683 return new_d_Cmp(NULL, op1, op2);
2685 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2686 return new_d_Conv(NULL, op, mode);
2688 ir_node *new_Cast (ir_node *op, type *to_tp) {
2689 return new_d_Cast(NULL, op, to_tp);
2691 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2692 return new_d_Phi(NULL, arity, in, mode);
2694 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
2695 return new_d_Load(NULL, store, addr, mode);
2697 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2698 return new_d_Store(NULL, store, addr, val);
2700 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2701 where_alloc where) {
2702 return new_d_Alloc(NULL, store, size, alloc_type, where);
2704 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2705 type *free_type, where_alloc where) {
2706 return new_d_Free(NULL, store, ptr, size, free_type, where);
2708 ir_node *new_Sync (int arity, ir_node **in) {
2709 return new_d_Sync(NULL, arity, in);
2711 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2712 return new_d_Proj(NULL, arg, mode, proj);
2714 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2715 return new_d_defaultProj(NULL, arg, max_proj);
2717 ir_node *new_Tuple (int arity, ir_node **in) {
2718 return new_d_Tuple(NULL, arity, in);
2720 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2721 return new_d_Id(NULL, val, mode);
2723 ir_node *new_Bad (void) {
2726 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2727 return new_d_Confirm (NULL, val, bound, cmp);
2729 ir_node *new_Unknown(ir_mode *m) {
2730 return new_d_Unknown(m);
2732 ir_node *new_CallBegin (ir_node *callee) {
2733 return new_d_CallBegin(NULL, callee);
2735 ir_node *new_EndReg (void) {
2736 return new_d_EndReg(NULL);
2738 ir_node *new_EndExcept (void) {
2739 return new_d_EndExcept(NULL);
2741 ir_node *new_Break (void) {
2742 return new_d_Break(NULL);
2744 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2745 return new_d_Filter(NULL, arg, mode, proj);
2747 ir_node *new_NoMem (void) {
2748 return new_d_NoMem();
2750 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
2751 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);