1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "common_t.h"
28 /* memset belongs to string.h */
31 /* # include "exc.h" */
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /*** ******************************************** */
44 /** privat interfaces, for professional use only */
46 /* Constructs a Block with a fixed number of predecessors.
47 Does not set current_block. Can not be used with automatic
48 Phi node construction. */
50 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
54 res = new_ir_node (irg, NULL, op_Block, mode_R, arity, in);
55 set_Block_matured(res, 1);
56 set_Block_block_visited(res, 0);
58 res->attr.block.exc = exc_normal;
65 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
69 res = new_ir_node (irg, block, op_Start, mode_T, 0, NULL);
76 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
80 res = new_ir_node (irg, block, op_End, mode_X, -1, NULL);
86 /* Creates a Phi node with all predecessors. Calling this constructor
87 is only allowed if the corresponding block is mature. */
89 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
93 assert( get_Block_matured(block) );
94 assert( get_irn_arity(block) == arity );
96 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
101 /* Memory Phis in endless loops must be kept alive.
102 As we can't distinguish these easily we keep all of them alive. */
103 if ((res->op == op_Phi) && (mode == mode_M))
104 add_End_keepalive(irg->end, res);
109 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
112 res = new_ir_node (irg, block, op_Const, mode, 0, NULL);
114 res = optimize (res);
118 res = local_optimize_newby (res);
125 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
127 ir_node *in[1] = {val};
129 res = new_ir_node (irg, block, op_Id, mode, 1, in);
130 res = optimize (res);
136 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
139 ir_node *in[1] = {arg};
141 res = new_ir_node (irg, block, op_Proj, mode, 1, in);
142 res->attr.proj = proj;
145 assert(get_Proj_pred(res));
146 assert(get_nodes_Block(get_Proj_pred(res)));
148 res = optimize (res);
156 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
160 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
161 arg->attr.c.kind = fragmentary;
162 arg->attr.c.default_proj = max_proj;
163 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
168 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
170 ir_node *in[1] = {op};
172 res = new_ir_node (irg, block, op_Conv, mode, 1, in);
173 res = optimize (res);
180 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
184 res = new_ir_node (irg, block, op_Tuple, mode_T, arity, in);
185 res = optimize (res);
191 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
192 ir_node *op1, ir_node *op2, ir_mode *mode)
194 ir_node *in[2] = {op1, op2};
196 res = new_ir_node (irg, block, op_Add, mode, 2, in);
197 res = optimize (res);
203 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
204 ir_node *op1, ir_node *op2, ir_mode *mode)
206 ir_node *in[2] = {op1, op2};
208 res = new_ir_node (irg, block, op_Sub, mode, 2, in);
209 res = optimize (res);
215 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
216 ir_node *op, ir_mode *mode)
218 ir_node *in[1] = {op};
220 res = new_ir_node (irg, block, op_Minus, mode, 1, in);
221 res = optimize (res);
227 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
228 ir_node *op1, ir_node *op2, ir_mode *mode)
230 ir_node *in[2] = {op1, op2};
232 res = new_ir_node (irg, block, op_Mul, mode, 2, in);
233 res = optimize (res);
239 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
240 ir_node *memop, ir_node *op1, ir_node *op2)
242 ir_node *in[3] = {memop, op1, op2};
244 res = new_ir_node (irg, block, op_Quot, mode_T, 3, in);
245 res = optimize (res);
251 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
252 ir_node *memop, ir_node *op1, ir_node *op2)
254 ir_node *in[3] = {memop, op1, op2};
256 res = new_ir_node (irg, block, op_DivMod, mode_T, 3, in);
257 res = optimize (res);
263 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
264 ir_node *memop, ir_node *op1, ir_node *op2)
266 ir_node *in[3] = {memop, op1, op2};
268 res = new_ir_node (irg, block, op_Div, mode_T, 3, in);
269 res = optimize (res);
275 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
276 ir_node *memop, ir_node *op1, ir_node *op2)
278 ir_node *in[3] = {memop, op1, op2};
280 res = new_ir_node (irg, block, op_Mod, mode_T, 3, in);
281 res = optimize (res);
287 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
290 ir_node *in[2] = {op1, op2};
292 res = new_ir_node (irg, block, op_And, mode, 2, in);
293 res = optimize (res);
299 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
300 ir_node *op1, ir_node *op2, ir_mode *mode)
302 ir_node *in[2] = {op1, op2};
304 res = new_ir_node (irg, block, op_Or, mode, 2, in);
305 res = optimize (res);
311 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
312 ir_node *op1, ir_node *op2, ir_mode *mode)
314 ir_node *in[2] = {op1, op2};
316 res = new_ir_node (irg, block, op_Eor, mode, 2, in);
317 res = optimize (res);
323 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
324 ir_node *op, ir_mode *mode)
326 ir_node *in[1] = {op};
328 res = new_ir_node (irg, block, op_Not, mode, 1, in);
329 res = optimize (res);
335 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
336 ir_node *op, ir_node *k, ir_mode *mode)
338 ir_node *in[2] = {op, k};
340 res = new_ir_node (irg, block, op_Shl, mode, 2, in);
341 res = optimize (res);
347 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
348 ir_node *op, ir_node *k, ir_mode *mode)
350 ir_node *in[2] = {op, k};
352 res = new_ir_node (irg, block, op_Shr, mode, 2, in);
353 res = optimize (res);
359 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
360 ir_node *op, ir_node *k, ir_mode *mode)
362 ir_node *in[2] = {op, k};
364 res = new_ir_node (irg, block, op_Shrs, mode, 2, in);
365 res = optimize (res);
371 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
372 ir_node *op, ir_node *k, ir_mode *mode)
374 ir_node *in[2] = {op, k};
376 res = new_ir_node (irg, block, op_Rot, mode, 2, in);
377 res = optimize (res);
383 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
384 ir_node *op, ir_mode *mode)
386 ir_node *in[1] = {op};
388 res = new_ir_node (irg, block, op_Abs, mode, 1, in);
389 res = optimize (res);
395 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
396 ir_node *op1, ir_node *op2)
398 ir_node *in[2] = {op1, op2};
400 res = new_ir_node (irg, block, op_Cmp, mode_T, 2, in);
401 res = optimize (res);
407 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
411 res = new_ir_node (irg, block, op_Jmp, mode_X, 0, in);
412 res = optimize (res);
418 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
420 ir_node *in[1] = {c};
422 res = new_ir_node (irg, block, op_Cond, mode_T, 1, in);
423 res->attr.c.kind = dense;
424 res->attr.c.default_proj = 0;
425 res = optimize (res);
431 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
432 ir_node *callee, int arity, ir_node **in, type *type)
439 NEW_ARR_A (ir_node *, r_in, r_arity);
442 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
444 res = new_ir_node (irg, block, op_Call, mode_T, r_arity, r_in);
446 assert(is_method_type(type));
447 set_Call_type(res, type);
448 res = optimize (res);
454 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
455 ir_node *store, int arity, ir_node **in)
462 NEW_ARR_A (ir_node *, r_in, r_arity);
464 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
465 res = new_ir_node (irg, block, op_Return, mode_X, r_arity, r_in);
466 res = optimize (res);
472 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
474 ir_node *in[2] = {store, obj};
476 res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
477 res = optimize (res);
483 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
484 ir_node *store, ir_node *adr)
486 ir_node *in[2] = {store, adr};
488 res = new_ir_node (irg, block, op_Load, mode_T, 2, in);
490 res = optimize (res);
496 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
497 ir_node *store, ir_node *adr, ir_node *val)
499 ir_node *in[3] = {store, adr, val};
501 res = new_ir_node (irg, block, op_Store, mode_T, 3, in);
503 res = optimize (res);
510 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
511 ir_node *size, type *alloc_type, where_alloc where)
513 ir_node *in[2] = {store, size};
515 res = new_ir_node (irg, block, op_Alloc, mode_T, 2, in);
517 res->attr.a.where = where;
518 res->attr.a.type = alloc_type;
520 res = optimize (res);
526 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
527 ir_node *ptr, ir_node *size, type *free_type)
529 ir_node *in[3] = {store, ptr, size};
531 res = new_ir_node (irg, block, op_Free, mode_T, 3, in);
533 res->attr.f = free_type;
535 res = optimize (res);
541 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
542 int arity, ir_node **in, entity *ent)
549 NEW_ARR_A (ir_node *, r_in, r_arity);
552 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
553 res = new_ir_node (irg, block, op_Sel, mode_p, r_arity, r_in);
555 res->attr.s.ltyp = static_linkage;
556 res->attr.s.ent = ent;
558 res = optimize (res);
564 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
565 symconst_kind symkind)
570 if (symkind == linkage_ptr_info)
574 res = new_ir_node (irg, block, op_SymConst, mode, 0, in);
576 res->attr.i.num = symkind;
577 if (symkind == linkage_ptr_info) {
578 res->attr.i.tori.ptrinfo = (ident *)value;
580 assert ( ( (symkind == type_tag)
581 || (symkind == size))
582 && (is_type(value)));
583 res->attr.i.tori.typ = (type *)value;
585 res = optimize (res);
591 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
595 res = new_ir_node (irg, block, op_Sync, mode_M, arity, in);
597 res = optimize (res);
605 return current_ir_graph->bad;
608 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
609 return new_rd_Block(NULL, irg, arity, in);
611 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
612 return new_rd_Start(NULL, irg, block);
614 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
615 return new_rd_End(NULL, irg, block);
617 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
618 return new_rd_Jmp(NULL, irg, block);
620 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
621 return new_rd_Cond(NULL, irg, block, c);
623 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
624 ir_node *store, int arity, ir_node **in) {
625 return new_rd_Return(NULL, irg, block, store, arity, in);
627 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
628 ir_node *store, ir_node *obj) {
629 return new_rd_Raise(NULL, irg, block, store, obj);
631 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
632 ir_mode *mode, tarval *con) {
633 return new_rd_Const(NULL, irg, block, mode, con);
635 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
636 type_or_id_p value, symconst_kind symkind) {
637 return new_rd_SymConst(NULL, irg, block, value, symkind);
639 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
640 ir_node *objptr, int n_index, ir_node **index,
642 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
644 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
645 ir_node *callee, int arity, ir_node **in,
647 return new_rd_Call(NULL, irg, block, store, callee, arity, in, type);
649 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
650 ir_node *op1, ir_node *op2, ir_mode *mode) {
651 return new_rd_Add(NULL, irg, block, op1, op2, mode);
653 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
654 ir_node *op1, ir_node *op2, ir_mode *mode) {
655 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
657 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
658 ir_node *op, ir_mode *mode) {
659 return new_rd_Minus(NULL, irg, block, op, mode);
661 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
662 ir_node *op1, ir_node *op2, ir_mode *mode) {
663 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
665 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
666 ir_node *memop, ir_node *op1, ir_node *op2) {
667 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
669 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
670 ir_node *memop, ir_node *op1, ir_node *op2) {
671 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
673 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
674 ir_node *memop, ir_node *op1, ir_node *op2) {
675 return new_rd_Div(NULL, irg, block, memop, op1, op2);
677 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
678 ir_node *memop, ir_node *op1, ir_node *op2) {
679 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
681 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
682 ir_node *op, ir_mode *mode) {
683 return new_rd_Abs(NULL, irg, block, op, mode);
685 ir_node *new_r_And (ir_graph *irg, ir_node *block,
686 ir_node *op1, ir_node *op2, ir_mode *mode) {
687 return new_rd_And(NULL, irg, block, op1, op2, mode);
689 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
690 ir_node *op1, ir_node *op2, ir_mode *mode) {
691 return new_rd_Or(NULL, irg, block, op1, op2, mode);
693 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
694 ir_node *op1, ir_node *op2, ir_mode *mode) {
695 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
697 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
698 ir_node *op, ir_mode *mode) {
699 return new_rd_Not(NULL, irg, block, op, mode);
701 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
702 ir_node *op1, ir_node *op2) {
703 return new_rd_Cmp(NULL, irg, block, op1, op2);
705 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
706 ir_node *op, ir_node *k, ir_mode *mode) {
707 return new_rd_Shl(NULL, irg, block, op, k, mode);
709 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
710 ir_node *op, ir_node *k, ir_mode *mode) {
711 return new_rd_Shr(NULL, irg, block, op, k, mode);
713 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
714 ir_node *op, ir_node *k, ir_mode *mode) {
715 return new_rd_Shrs(NULL, irg, block, op, k, mode);
717 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
718 ir_node *op, ir_node *k, ir_mode *mode) {
719 return new_rd_Rot(NULL, irg, block, op, k, mode);
721 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
722 ir_node *op, ir_mode *mode) {
723 return new_rd_Conv(NULL, irg, block, op, mode);
725 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
726 ir_node **in, ir_mode *mode) {
727 return new_rd_Phi(NULL, irg, block, arity, in, mode);
729 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
730 ir_node *store, ir_node *adr) {
731 return new_rd_Load(NULL, irg, block, store, adr);
733 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
734 ir_node *store, ir_node *adr, ir_node *val) {
735 return new_rd_Store(NULL, irg, block, store, adr, val);
737 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
738 ir_node *size, type *alloc_type, where_alloc where) {
739 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
741 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
742 ir_node *ptr, ir_node *size, type *free_type) {
743 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
745 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
746 return new_rd_Sync(NULL, irg, block, arity, in);
748 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
749 ir_mode *mode, long proj) {
750 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
752 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
754 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
756 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
757 int arity, ir_node **in) {
758 return new_rd_Tuple(NULL, irg, block, arity, in );
760 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
761 ir_node *val, ir_mode *mode) {
762 return new_rd_Id(NULL, irg, block, val, mode);
764 ir_node *new_r_Bad () {
768 /** ********************/
769 /** public interfaces */
770 /** construction tools */
772 /****f* ircons/new_Start
775 * new_Start -- create a new Start node in the current block
778 * s = new_Start(void);
779 * ir_node* new_Start(void);
782 * s - pointer to the created Start node
787 new_d_Start (dbg_info* db)
791 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
792 op_Start, mode_T, 0, NULL);
794 res = optimize (res);
800 new_d_End (dbg_info* db)
803 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
804 op_End, mode_X, -1, NULL);
805 res = optimize (res);
811 /* Constructs a Block with a fixed number of predecessors.
812 Does set current_block. Can be used with automatic Phi
813 node construction. */
815 new_d_Block (dbg_info* db, int arity, ir_node **in)
819 res = new_rd_Block (db, current_ir_graph, arity, in);
821 /* Create and initialize array for Phi-node construction. */
822 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
823 current_ir_graph->n_loc);
824 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
826 res = optimize (res);
827 current_ir_graph->current_block = res;
834 /* ***********************************************************************/
835 /* Methods necessary for automatic Phi node creation */
837 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
838 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
839 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
840 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
842 Call Graph: ( A ---> B == A "calls" B)
844 get_value mature_block
852 get_r_value_internal |
856 new_rd_Phi0 new_rd_Phi_in
858 * *************************************************************************** */
860 /* Creates a Phi node with 0 predecessors */
862 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
865 res = new_ir_node (irg, block, op_Phi, mode, 0, NULL);
870 /* There are two implementations of the Phi node construction. The first
871 is faster, but does not work for blocks with more than 2 predecessors.
872 The second works always but is slower and causes more unnecessary Phi
874 Select the implementations by the following preprocessor flag set in
876 #if USE_FAST_PHI_CONSTRUCTION
878 /* This is a stack used for allocating and deallocating nodes in
879 new_rd_Phi_in. The original implementation used the obstack
880 to model this stack, now it is explicit. This reduces side effects.
882 #if USE_EXPLICIT_PHI_IN_STACK
887 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
889 res->stack = NEW_ARR_F (ir_node *, 1);
896 free_Phi_in_stack(Phi_in_stack *s) {
901 void free_to_Phi_in_stack(ir_node *phi) {
902 assert(get_irn_opcode(phi) == iro_Phi);
904 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
905 current_ir_graph->Phi_in_stack->pos)
906 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
908 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
910 (current_ir_graph->Phi_in_stack->pos)++;
914 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
915 int arity, ir_node **in) {
917 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
918 int pos = current_ir_graph->Phi_in_stack->pos;
922 /* We need to allocate a new node */
923 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
925 /* reuse the old node and initialize it again. */
928 assert (res->kind == k_ir_node);
929 assert (res->op == op_Phi);
934 /* ???!!! How to free the old in array?? */
935 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
937 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
939 (current_ir_graph->Phi_in_stack->pos)--;
943 #endif /* USE_EXPLICIT_PHI_IN_STACK */
945 /* Creates a Phi node with a given, fixed array **in of predecessors.
946 If the Phi node is unnecessary, as the same value reaches the block
947 through all control flow paths, it is eliminated and the value
948 returned directly. This constructor is only intended for use in
949 the automatic Phi node generation triggered by get_value or mature.
950 The implementation is quite tricky and depends on the fact, that
951 the nodes are allocated on a stack:
952 The in array contains predecessors and NULLs. The NULLs appear,
953 if get_r_value_internal, that computed the predecessors, reached
954 the same block on two paths. In this case the same value reaches
955 this block on both paths, there is no definition in between. We need
956 not allocate a Phi where these path's merge, but we have to communicate
957 this fact to the caller. This happens by returning a pointer to the
958 node the caller _will_ allocate. (Yes, we predict the address. We can
959 do so because the nodes are allocated on the obstack.) The caller then
960 finds a pointer to itself and, when this routine is called again,
964 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
965 ir_node **in, int ins)
968 ir_node *res, *known;
970 /* allocate a new node on the obstack.
971 This can return a node to which some of the pointers in the in-array
973 Attention: the constructor copies the in array, i.e., the later changes
974 to the array in this routine do not affect the constructed node! If
975 the in array contains NULLs, there will be missing predecessors in the
977 Is this a possible internal state of the Phi node generation? */
978 #if USE_EXPLICIT_PHI_IN_STACK
979 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
981 res = known = new_ir_node (irg, block, op_Phi, mode, ins, in);
983 /* The in-array can contain NULLs. These were returned by
984 get_r_value_internal if it reached the same block/definition on a
986 The NULLs are replaced by the node itself to simplify the test in the
988 for (i=0; i < ins; ++i)
989 if (in[i] == NULL) in[i] = res;
991 /* This loop checks whether the Phi has more than one predecessor.
992 If so, it is a real Phi node and we break the loop. Else the
993 Phi node merges the same definition on several paths and therefore
995 for (i=0; i < ins; ++i)
997 if (in[i]==res || in[i]==known) continue;
1005 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1007 #if USE_EXPLICIT_PHI_IN_STACK
1008 free_to_Phi_in_stack(res);
1010 obstack_free (current_ir_graph->obst, res);
1014 res = optimize (res);
1018 /* return the pointer to the Phi node. This node might be deallocated! */
1023 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1025 /** This function computes the predecessors for a real Phi node, and then
1026 allocates and returns this node. The routine called to allocate the
1027 node might optimize it away and return a real value, or even a pointer
1028 to a deallocated Phi node on top of the obstack!
1029 This function is called with an in-array of proper size. **/
1030 static inline ir_node *
1031 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1033 ir_node *prevBlock, *res;
1036 /* This loop goes to all predecessor blocks of the block the Phi node is in
1037 and there finds the operands of the Phi node by calling
1038 get_r_value_internal. */
1039 for (i = 1; i <= ins; ++i) {
1040 assert (block->in[i]);
1041 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1043 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1046 /* After collecting all predecessors into the array nin a new Phi node
1047 with these predecessors is created. This constructor contains an
1048 optimization: If all predecessors of the Phi node are identical it
1049 returns the only operand instead of a new Phi node. If the value
1050 passes two different control flow edges without being defined, and
1051 this is the second path treated, a pointer to the node that will be
1052 allocated for the first path (recursion) is returned. We already
1053 know the address of this node, as it is the next node to be allocated
1054 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1055 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1057 /* Now we now the value for "pos" and can enter it in the array with
1058 all known local variables. Attention: this might be a pointer to
1059 a node, that later will be allocated!!! See new_rd_Phi_in.
1060 If this is called in mature, after some set_value in the same block,
1061 the proper value must not be overwritten:
1063 get_value (makes Phi0, put's it into graph_arr)
1064 set_value (overwrites Phi0 in graph_arr)
1065 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1068 if (!block->attr.block.graph_arr[pos]) {
1069 block->attr.block.graph_arr[pos] = res;
1071 /* printf(" value already computed by %s\n",
1072 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1078 /* This function returns the last definition of a variable. In case
1079 this variable was last defined in a previous block, Phi nodes are
1080 inserted. If the part of the firm graph containing the definition
1081 is not yet constructed, a dummy Phi node is returned. */
1083 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1086 /* There are 4 cases to treat.
1088 1. The block is not mature and we visit it the first time. We can not
1089 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1090 predecessors is returned. This node is added to the linked list (field
1091 "link") of the containing block to be completed when this block is
1092 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1095 2. The value is already known in this block, graph_arr[pos] is set and we
1096 visit the block the first time. We can return the value without
1097 creating any new nodes.
1099 3. The block is mature and we visit it the first time. A Phi node needs
1100 to be created (phi_merge). If the Phi is not needed, as all it's
1101 operands are the same value reaching the block through different
1102 paths, it's optimized away and the value itself is returned.
1104 4. The block is mature, and we visit it the second time. Now two
1105 subcases are possible:
1106 * The value was computed completely the last time we were here. This
1107 is the case if there is no loop. We can return the proper value.
1108 * The recursion that visited this node and set the flag did not
1109 return yet. We are computing a value in a loop and need to
1110 break the recursion without knowing the result yet.
1111 @@@ strange case. Straight forward we would create a Phi before
1112 starting the computation of it's predecessors. In this case we will
1113 find a Phi here in any case. The problem is that this implementation
1114 only creates a Phi after computing the predecessors, so that it is
1115 hard to compute self references of this Phi. @@@
1116 There is no simple check for the second subcase. Therefore we check
1117 for a second visit and treat all such cases as the second subcase.
1118 Anyways, the basic situation is the same: we reached a block
1119 on two paths without finding a definition of the value: No Phi
1120 nodes are needed on both paths.
1121 We return this information "Two paths, no Phi needed" by a very tricky
1122 implementation that relies on the fact that an obstack is a stack and
1123 will return a node with the same address on different allocations.
1124 Look also at phi_merge and new_rd_phi_in to understand this.
1125 @@@ Unfortunately this does not work, see testprogram
1126 three_cfpred_example.
1130 /* case 4 -- already visited. */
1131 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1133 /* visited the first time */
1134 set_irn_visited(block, get_irg_visited(current_ir_graph));
1136 /* Get the local valid value */
1137 res = block->attr.block.graph_arr[pos];
1139 /* case 2 -- If the value is actually computed, return it. */
1140 if (res) { return res;};
1142 if (block->attr.block.matured) { /* case 3 */
1144 /* The Phi has the same amount of ins as the corresponding block. */
1145 int ins = get_irn_arity(block);
1147 NEW_ARR_A (ir_node *, nin, ins);
1149 /* Phi merge collects the predecessors and then creates a node. */
1150 res = phi_merge (block, pos, mode, nin, ins);
1152 } else { /* case 1 */
1153 /* The block is not mature, we don't know how many in's are needed. A Phi
1154 with zero predecessors is created. Such a Phi node is called Phi0
1155 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1156 to the list of Phi0 nodes in this block to be matured by mature_block
1158 The Phi0 has to remember the pos of it's internal value. If the real
1159 Phi is computed, pos is used to update the array with the local
1162 res = new_rd_Phi0 (current_ir_graph, block, mode);
1163 res->attr.phi0_pos = pos;
1164 res->link = block->link;
1168 /* If we get here, the frontend missed a use-before-definition error */
1171 printf("Error: no value set. Use of undefined variable. Initializing
1173 assert (mode->code >= irm_f && mode->code <= irm_p);
1174 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1175 tarval_mode_null[mode->code]);
1178 /* The local valid value is available now. */
1179 block->attr.block.graph_arr[pos] = res;
1186 /** This is the simple algorithm. If first generates a Phi0, then
1187 it starts the recursion. This causes an Id at the entry of
1188 every block that has no definition of the value! **/
1190 #if USE_EXPLICIT_PHI_IN_STACK
1192 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1193 void free_Phi_in_stack(Phi_in_stack *s) { }
1197 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1198 ir_node **in, int ins)
1201 ir_node *res, *known;
1203 /* Allocate a new node on the obstack. The allocation copies the in
1205 res = new_ir_node (irg, block, op_Phi, mode, ins, in);
1207 /* This loop checks whether the Phi has more than one predecessor.
1208 If so, it is a real Phi node and we break the loop. Else the
1209 Phi node merges the same definition on several paths and therefore
1210 is not needed. Don't consider Bad nodes! */
1212 for (i=0; i < ins; ++i)
1216 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1224 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1227 obstack_free (current_ir_graph->obst, res);
1230 /* A undefined value, e.g., in unreachable code. */
1234 res = optimize (res);
1236 /* Memory Phis in endless loops must be kept alive.
1237 As we can't distinguish these easily we keep all of the alive. */
1238 if ((res->op == op_Phi) && (mode == mode_M))
1239 add_End_keepalive(irg->end, res);
1246 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1248 #if PRECISE_EXC_CONTEXT
1249 static inline ir_node *
1250 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1253 new_frag_arr (ir_node *n) {
1256 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1257 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1258 sizeof(ir_node *)*current_ir_graph->n_loc);
1259 /* turn off optimization before allocating Proj nodes, as res isn't
1261 opt = get_optimize(); set_optimize(0);
1262 /* Here we rely on the fact that all frag ops have Memory as first result! */
1263 if (get_irn_op(n) == op_Call)
1264 arr[0] = new_Proj(n, mode_M, 3);
1266 arr[0] = new_Proj(n, mode_M, 0);
1268 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1273 get_frag_arr (ir_node *n) {
1274 if (get_irn_op(n) == op_Call) {
1275 return n->attr.call.frag_arr;
1276 } else if (get_irn_op(n) == op_Alloc) {
1277 return n->attr.a.frag_arr;
1279 return n->attr.frag_arr;
1284 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1285 if (!frag_arr[pos]) frag_arr[pos] = val;
1286 if (frag_arr[current_ir_graph->n_loc - 1])
1287 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1291 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1296 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1298 frag_arr = get_frag_arr(cfOp);
1299 res = frag_arr[pos];
1301 if (block->attr.block.graph_arr[pos]) {
1302 /* There was a set_value after the cfOp and no get_value before that
1303 set_value. We must build a Phi node now. */
1304 if (block->attr.block.matured) {
1305 int ins = get_irn_arity(block);
1307 NEW_ARR_A (ir_node *, nin, ins);
1308 res = phi_merge(block, pos, mode, nin, ins);
1310 res = new_rd_Phi0 (current_ir_graph, block, mode);
1311 res->attr.phi0_pos = pos;
1312 res->link = block->link;
1316 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1317 but this should be better: (remove comment if this works) */
1318 /* It's a Phi, we can write this into all graph_arrs with NULL */
1319 set_frag_value(block->attr.block.graph_arr, pos, res);
1321 res = get_r_value_internal(block, pos, mode);
1322 set_frag_value(block->attr.block.graph_arr, pos, res);
1329 /** This function allocates a dummy Phi node to break recursions,
1330 computes the predecessors for the real phi node, and then
1331 allocates and returns this node. The routine called to allocate the
1332 node might optimize it away and return a real value.
1333 This function is called with an in-array of proper size. **/
1334 static inline ir_node *
1335 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1337 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1340 /* If this block has no value at pos create a Phi0 and remember it
1341 in graph_arr to break recursions.
1342 Else we may not set graph_arr as there a later value is remembered. */
1344 if (!block->attr.block.graph_arr[pos]) {
1345 /* This is commented out as collapsing to Bads is no good idea.
1346 Either we need an assert here, or we need to call a routine
1347 that deals with this case as appropriate for the given language.
1348 Right now a self referencing Id is created which will crash irg_vrfy().
1350 Even if all variables are defined before use, it can happen that
1351 we get to the start block, if a cond has been replaced by a tuple
1352 (bad, jmp). As the start has a self referencing control flow edge,
1353 we get a self referencing Id, which is hard to optimize away. We avoid
1354 this by defining the value as a Bad node.
1355 Returning a const with tarval_bad is a preliminary solution. In some
1356 situations we might want a Warning or an Error. */
1358 if (block == get_irg_start_block(current_ir_graph)) {
1359 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1360 /* We don't need to care about exception ops in the start block.
1361 There are none by definition. */
1362 return block->attr.block.graph_arr[pos];
1364 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1365 block->attr.block.graph_arr[pos] = phi0;
1366 #if PRECISE_EXC_CONTEXT
1367 /* Set graph_arr for fragile ops. Also here we should break recursion.
1368 We could choose a cyclic path through an cfop. But the recursion would
1369 break at some point. */
1370 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1375 /* This loop goes to all predecessor blocks of the block the Phi node
1376 is in and there finds the operands of the Phi node by calling
1377 get_r_value_internal. */
1378 for (i = 1; i <= ins; ++i) {
1379 prevCfOp = skip_Proj(block->in[i]);
1381 if (is_Bad(prevCfOp)) {
1382 /* In case a Cond has been optimized we would get right to the start block
1383 with an invalid definition. */
1384 nin[i-1] = new_Bad();
1387 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1389 if (!is_Bad(prevBlock)) {
1390 #if PRECISE_EXC_CONTEXT
1391 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1392 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1393 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1396 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1398 nin[i-1] = new_Bad();
1402 /* After collecting all predecessors into the array nin a new Phi node
1403 with these predecessors is created. This constructor contains an
1404 optimization: If all predecessors of the Phi node are identical it
1405 returns the only operand instead of a new Phi node. */
1406 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1408 /* In case we allocated a Phi0 node at the beginning of this procedure,
1409 we need to exchange this Phi0 with the real Phi. */
1411 exchange(phi0, res);
1412 block->attr.block.graph_arr[pos] = res;
1413 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1414 only an optimization. */
1420 /* This function returns the last definition of a variable. In case
1421 this variable was last defined in a previous block, Phi nodes are
1422 inserted. If the part of the firm graph containing the definition
1423 is not yet constructed, a dummy Phi node is returned. */
1425 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1428 /* There are 4 cases to treat.
1430 1. The block is not mature and we visit it the first time. We can not
1431 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1432 predecessors is returned. This node is added to the linked list (field
1433 "link") of the containing block to be completed when this block is
1434 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1437 2. The value is already known in this block, graph_arr[pos] is set and we
1438 visit the block the first time. We can return the value without
1439 creating any new nodes.
1441 3. The block is mature and we visit it the first time. A Phi node needs
1442 to be created (phi_merge). If the Phi is not needed, as all it's
1443 operands are the same value reaching the block through different
1444 paths, it's optimized away and the value itself is returned.
1446 4. The block is mature, and we visit it the second time. Now two
1447 subcases are possible:
1448 * The value was computed completely the last time we were here. This
1449 is the case if there is no loop. We can return the proper value.
1450 * The recursion that visited this node and set the flag did not
1451 return yet. We are computing a value in a loop and need to
1452 break the recursion. This case only happens if we visited
1453 the same block with phi_merge before, which inserted a Phi0.
1454 So we return the Phi0.
1457 /* case 4 -- already visited. */
1458 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1459 /* As phi_merge allocates a Phi0 this value is always defined. Here
1460 is the critical difference of the two algorithms. */
1461 assert(block->attr.block.graph_arr[pos]);
1462 return block->attr.block.graph_arr[pos];
1465 /* visited the first time */
1466 set_irn_visited(block, get_irg_visited(current_ir_graph));
1468 /* Get the local valid value */
1469 res = block->attr.block.graph_arr[pos];
1471 /* case 2 -- If the value is actually computed, return it. */
1472 if (res) { return res; };
1474 if (block->attr.block.matured) { /* case 3 */
1476 /* The Phi has the same amount of ins as the corresponding block. */
1477 int ins = get_irn_arity(block);
1479 NEW_ARR_A (ir_node *, nin, ins);
1481 /* Phi merge collects the predecessors and then creates a node. */
1482 res = phi_merge (block, pos, mode, nin, ins);
1484 } else { /* case 1 */
1485 /* The block is not mature, we don't know how many in's are needed. A Phi
1486 with zero predecessors is created. Such a Phi node is called Phi0
1487 node. The Phi0 is then added to the list of Phi0 nodes in this block
1488 to be matured by mature_block later.
1489 The Phi0 has to remember the pos of it's internal value. If the real
1490 Phi is computed, pos is used to update the array with the local
1492 res = new_rd_Phi0 (current_ir_graph, block, mode);
1493 res->attr.phi0_pos = pos;
1494 res->link = block->link;
1498 /* If we get here, the frontend missed a use-before-definition error */
1501 printf("Error: no value set. Use of undefined variable. Initializing
1503 assert (mode->code >= irm_f && mode->code <= irm_p);
1504 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1505 tarval_mode_null[mode->code]);
1508 /* The local valid value is available now. */
1509 block->attr.block.graph_arr[pos] = res;
1514 #endif /* USE_FAST_PHI_CONSTRUCTION */
1516 /* ************************************************************************** */
1518 /** Finalize a Block node, when all control flows are known. */
1519 /** Acceptable parameters are only Block nodes. */
1521 mature_block (ir_node *block)
1528 assert (get_irn_opcode(block) == iro_Block);
1529 // assert (!get_Block_matured(block) && "Block already matured");
1531 if (!get_Block_matured(block)) {
1533 /* An array for building the Phi nodes. */
1534 ins = ARR_LEN (block->in)-1;
1535 NEW_ARR_A (ir_node *, nin, ins);
1536 /* shouldn't we delete this array at the end of the procedure? @@@ memory leak? */
1538 /* Traverse a chain of Phi nodes attached to this block and mature
1540 for (n = block->link; n; n=next) {
1541 inc_irg_visited(current_ir_graph);
1543 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1546 block->attr.block.matured = 1;
1548 /* Now, as the block is a finished firm node, we can optimize it.
1549 Since other nodes have been allocated since the block was created
1550 we can not free the node on the obstack. Therefore we have to call
1552 Unfortunately the optimization does not change a lot, as all allocated
1553 nodes refer to the unoptimized node.
1554 We can call _2, as global cse has no effect on blocks. */
1555 block = optimize_in_place_2(block);
1561 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1563 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1568 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1570 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1575 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1577 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1582 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1584 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1589 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1592 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1593 arg->attr.c.kind = fragmentary;
1594 arg->attr.c.default_proj = max_proj;
1595 res = new_Proj (arg, mode_X, max_proj);
1600 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1602 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1607 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1609 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1614 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1616 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1621 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1623 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1629 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1631 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1636 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1638 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1643 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1646 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1648 #if PRECISE_EXC_CONTEXT
1649 if ((current_ir_graph->phase_state == phase_building) &&
1650 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1651 res->attr.frag_arr = new_frag_arr(res);
1658 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1661 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1663 #if PRECISE_EXC_CONTEXT
1664 if ((current_ir_graph->phase_state == phase_building) &&
1665 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1666 res->attr.frag_arr = new_frag_arr(res);
1673 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1676 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1678 #if PRECISE_EXC_CONTEXT
1679 if ((current_ir_graph->phase_state == phase_building) &&
1680 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1681 res->attr.frag_arr = new_frag_arr(res);
1688 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1691 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1693 #if PRECISE_EXC_CONTEXT
1694 if ((current_ir_graph->phase_state == phase_building) &&
1695 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1696 res->attr.frag_arr = new_frag_arr(res);
1703 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1705 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1710 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1712 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1717 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1719 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1724 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1726 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1731 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1733 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1738 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1740 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1745 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1747 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1752 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1754 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1759 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1761 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1766 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1768 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1773 new_d_Jmp (dbg_info* db)
1775 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1779 new_d_Cond (dbg_info* db, ir_node *c)
1781 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1785 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1789 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1790 store, callee, arity, in, type);
1791 #if PRECISE_EXC_CONTEXT
1792 if ((current_ir_graph->phase_state == phase_building) &&
1793 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1794 res->attr.call.frag_arr = new_frag_arr(res);
1801 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1803 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1808 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1810 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1815 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1818 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1820 #if PRECISE_EXC_CONTEXT
1821 if ((current_ir_graph->phase_state == phase_building) &&
1822 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1823 res->attr.frag_arr = new_frag_arr(res);
1830 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1833 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1835 #if PRECISE_EXC_CONTEXT
1836 if ((current_ir_graph->phase_state == phase_building) &&
1837 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1838 res->attr.frag_arr = new_frag_arr(res);
1845 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1849 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1850 store, size, alloc_type, where);
1851 #if PRECISE_EXC_CONTEXT
1852 if ((current_ir_graph->phase_state == phase_building) &&
1853 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1854 res->attr.a.frag_arr = new_frag_arr(res);
1861 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1863 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1864 store, ptr, size, free_type);
1868 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
1869 /* GL: objptr was called frame before. Frame was a bad choice for the name
1870 as the operand could as well be a pointer to a dynamic object. */
1872 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
1873 store, objptr, 0, NULL, ent);
1877 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1879 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
1880 store, objptr, n_index, index, sel);
1884 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
1886 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
1891 new_d_Sync (dbg_info* db, int arity, ir_node** in)
1893 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
1901 return current_ir_graph->bad;
1904 /* ********************************************************************* */
1905 /* Comfortable interface with automatic Phi node construction. */
1906 /* (Uses also constructors of ?? interface, except new_Block. */
1907 /* ********************************************************************* */
1909 /** Block construction **/
1910 /* immature Block without predecessors */
1911 ir_node *new_d_immBlock (dbg_info* db) {
1914 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1915 /* creates a new dynamic in-array as length of in is -1 */
1916 res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
1917 current_ir_graph->current_block = res;
1918 res->attr.block.matured = 0;
1919 res->attr.block.exc = exc_normal;
1920 set_Block_block_visited(res, 0);
1922 /* Create and initialize array for Phi-node construction. */
1923 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1924 current_ir_graph->n_loc);
1925 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1927 /* Immature block may not be optimized! */
1935 return new_d_immBlock(NULL);
1938 /* add an adge to a jmp/control flow node */
1940 add_in_edge (ir_node *block, ir_node *jmp)
1942 if (block->attr.block.matured) {
1943 assert(0 && "Error: Block already matured!\n");
1946 assert (jmp != NULL);
1947 ARR_APP1 (ir_node *, block->in, jmp);
1951 /* changing the current block */
1953 switch_block (ir_node *target)
1955 current_ir_graph->current_block = target;
1958 /* ************************ */
1959 /* parameter administration */
1961 /* get a value from the parameter array from the current block by its index */
1963 get_d_value (dbg_info* db, int pos, ir_mode *mode)
1965 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1966 inc_irg_visited(current_ir_graph);
1968 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
1970 /* get a value from the parameter array from the current block by its index */
1972 get_value (int pos, ir_mode *mode)
1974 return get_d_value(NULL, pos, mode);
1977 /* set a value at position pos in the parameter array from the current block */
1979 set_value (int pos, ir_node *value)
1981 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1982 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
1985 /* get the current store */
1989 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1990 /* GL: one could call get_value instead */
1991 inc_irg_visited(current_ir_graph);
1992 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
1995 /* set the current store */
1997 set_store (ir_node *store)
1999 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2000 /* GL: one could call set_value instead */
2001 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2005 keep_alive (ir_node *ka)
2007 add_End_keepalive(current_ir_graph->end, ka);
2010 /** Useful access routines **/
2011 /* Returns the current block of the current graph. To set the current
2012 block use switch_block(). */
2013 ir_node *get_cur_block() {
2014 return get_irg_current_block(current_ir_graph);
2017 /* Returns the frame type of the current graph */
2018 type *get_cur_frame_type() {
2019 return get_irg_frame_type(current_ir_graph);
2023 /* ********************************************************************* */
2026 /* call once for each run of the library */
2032 /* call for each graph */
2034 finalize_cons (ir_graph *irg) {
2035 irg->phase_state = phase_high;
2039 ir_node *new_Block(int arity, ir_node **in) {
2040 return new_d_Block(NULL, arity, in);
2042 ir_node *new_Start (void) {
2043 return new_d_Start(NULL);
2045 ir_node *new_End (void) {
2046 return new_d_End(NULL);
2048 ir_node *new_Jmp (void) {
2049 return new_d_Jmp(NULL);
2051 ir_node *new_Cond (ir_node *c) {
2052 return new_d_Cond(NULL, c);
2054 ir_node *new_Return (ir_node *store, int arity, ir_node **in) {
2055 return new_d_Return(NULL, store, arity, in);
2057 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2058 return new_d_Raise(NULL, store, obj);
2060 ir_node *new_Const (ir_mode *mode, tarval *con) {
2061 return new_d_Const(NULL, mode, con);
2063 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2064 return new_d_SymConst(NULL, value, kind);
2066 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2067 return new_d_simpleSel(NULL, store, objptr, ent);
2069 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2071 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2073 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2075 return new_d_Call(NULL, store, callee, arity, in, type);
2077 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2078 return new_d_Add(NULL, op1, op2, mode);
2080 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2081 return new_d_Sub(NULL, op1, op2, mode);
2083 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2084 return new_d_Minus(NULL, op, mode);
2086 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2087 return new_d_Mul(NULL, op1, op2, mode);
2089 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2090 return new_d_Quot(NULL, memop, op1, op2);
2092 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2093 return new_d_DivMod(NULL, memop, op1, op2);
2095 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2096 return new_d_Div(NULL, memop, op1, op2);
2098 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2099 return new_d_Mod(NULL, memop, op1, op2);
2101 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2102 return new_d_Abs(NULL, op, mode);
2104 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2105 return new_d_And(NULL, op1, op2, mode);
2107 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2108 return new_d_Or(NULL, op1, op2, mode);
2110 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2111 return new_d_Eor(NULL, op1, op2, mode);
2113 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2114 return new_d_Not(NULL, op, mode);
2116 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2117 return new_d_Shl(NULL, op, k, mode);
2119 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2120 return new_d_Shr(NULL, op, k, mode);
2122 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2123 return new_d_Shrs(NULL, op, k, mode);
2125 #define new_Rotate new_Rot
2126 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2127 return new_d_Rot(NULL, op, k, mode);
2129 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2130 return new_d_Cmp(NULL, op1, op2);
2132 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2133 return new_d_Conv(NULL, op, mode);
2135 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2136 return new_d_Phi(NULL, arity, in, mode);
2138 ir_node *new_Load (ir_node *store, ir_node *addr) {
2139 return new_d_Load(NULL, store, addr);
2141 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2142 return new_d_Store(NULL, store, addr, val);
2144 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2145 where_alloc where) {
2146 return new_d_Alloc(NULL, store, size, alloc_type, where);
2148 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2150 return new_d_Free(NULL, store, ptr, size, free_type);
2152 ir_node *new_Sync (int arity, ir_node **in) {
2153 return new_d_Sync(NULL, arity, in);
2155 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2156 return new_d_Proj(NULL, arg, mode, proj);
2158 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2159 return new_d_defaultProj(NULL, arg, max_proj);
2161 ir_node *new_Tuple (int arity, ir_node **in) {
2162 return new_d_Tuple(NULL, arity, in);
2164 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2165 return new_d_Id(NULL, val, mode);
2167 ir_node *new_Bad (void) {