3 * File name: ir/ir/irgraph.c
4 * Purpose: Entry point to the representation of procedure code.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
25 # include "irgraph_t.h"
26 # include "irprog_t.h"
27 # include "irnode_t.h"
29 # include "irflag_t.h"
34 # include "firmstat.h"
38 * Indicates, whether additional data can be registered to graphs.
39 * If set to 1, this is not possible anymore.
41 static int forbid_new_data = 0;
44 * The amount of additional space for custom data to be allocated upon
45 * creating a new graph.
47 static size_t additional_graph_data_size = 0;
49 ir_graph *current_ir_graph;
50 INLINE ir_graph *get_current_ir_graph(void) {
51 return current_ir_graph;
53 INLINE void set_current_ir_graph(ir_graph *graph) {
54 current_ir_graph = graph;
58 int __interprocedural_view = false;
60 int (get_interprocedural_view)(void) {
61 return __get_interprocedural_view();
64 void (set_interprocedural_view)(int state) {
65 __interprocedural_view = state;
67 /* set function vectors for faster access */
69 __get_irn_arity = __get_irn_inter_arity;
70 __get_irn_n = __get_irn_inter_n;
73 __get_irn_arity = __get_irn_intra_arity;
74 __get_irn_n = __get_irn_intra_n;
78 static ident* frame_type_suffix = NULL;
79 void init_irgraph(void) {
80 frame_type_suffix = new_id_from_str(FRAME_TP_SUFFIX);
85 * Allocate a new ir graph.
86 * This function respects the registered graph data. The only reason for
87 * this function is, that there are two locations, where graphs are
88 * allocated (new_r_ir_graph, new_const_code_irg).
89 * @return Memory for a new graph.
91 ir_graph *alloc_graph(void)
93 size_t size = sizeof(ir_graph) + additional_graph_data_size;
94 char *ptr = xmalloc(size);
97 return (ir_graph *) (ptr + additional_graph_data_size);
100 #if USE_EXPLICIT_PHI_IN_STACK
101 /* really defined in ircons.c */
102 typedef struct Phi_in_stack Phi_in_stack;
103 Phi_in_stack *new_Phi_in_stack();
104 void free_Phi_in_stack(Phi_in_stack *s);
107 /* Allocates a list of nodes:
108 - The start block containing a start node and Proj nodes for it's four
109 results (X, M, P, Tuple).
110 - The end block containing an end node. This block is not matured after
111 new_ir_graph as predecessors need to be added to it.
112 - The current block, which is empty and also not matured.
113 Further it allocates several datastructures needed for graph construction
117 new_r_ir_graph (entity *ent, int n_loc)
120 ir_node *first_block;
124 res->kind = k_ir_graph;
126 /* inform statistics here, as blocks will be already build on this graph */
127 stat_new_graph(res, ent);
129 current_ir_graph = res;
131 /*-- initialized for each graph. --*/
132 if (get_opt_precise_exc_context()) {
133 res->n_loc = n_loc + 1 + 1; /* number of local variables that are never
134 dereferenced in this graph plus one for
135 the store plus one for links to fragile
136 operations. n_loc is not the number of
137 parameters to the procedure! */
140 res->n_loc = n_loc + 1; /* number of local variables that are never
141 dereferenced in this graph plus one for
142 the store. This is not the number of parameters
146 res->visited = 0; /* visited flag, for the ir walker */
147 res->block_visited = 0; /* visited flag, for the 'block'-walker */
149 #if USE_EXPLICIT_PHI_IN_STACK
150 res->Phi_in_stack = new_Phi_in_stack(); /* A stack needed for automatic Phi
153 res->kind = k_ir_graph;
154 res->obst = xmalloc (sizeof(*res->obst));
155 obstack_init (res->obst);
156 res->value_table = new_identities (); /* value table for global value
157 numbering for optimizing use in
161 res->phase_state = phase_building;
162 res->op_pin_state_pinned = op_pin_state_pinned;
163 res->outs_state = outs_none;
164 res->dom_state = dom_none;
165 res->typeinfo_state = irg_typeinfo_none;
166 res->loopinfo_state = loopinfo_none;
168 /*-- Type information for the procedure of the graph --*/
170 set_entity_irg(ent, res);
172 /*-- a class type so that it can contain "inner" methods as in Pascal. --*/
173 res->frame_type = new_type_class(mangle(get_entity_ident(ent), frame_type_suffix));
175 /* Remove type from type list. Must be treated differently than other types. */
176 remove_irp_type_from_list(res->frame_type);
178 /*-- Nodes needed in every graph --*/
179 res->end_block = new_immBlock();
180 res->end = new_End();
181 res->end_reg = res->end;
182 res->end_except = res->end;
184 res->start_block = new_immBlock();
185 res->start = new_Start();
186 res->bad = new_ir_node(NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
187 res->no_mem = new_ir_node(NULL, res, res->start_block, op_NoMem, mode_M, 0, NULL);
189 /* Proj results of start node */
190 projX = new_Proj (res->start, mode_X, pn_Start_X_initial_exec);
191 res->frame = new_Proj (res->start, mode_P_mach, pn_Start_P_frame_base);
192 res->globals = new_Proj (res->start, mode_P_mach, pn_Start_P_globals);
193 res->initial_mem = new_Proj (res->start, mode_M, pn_Start_M);
194 res->args = new_Proj (res->start, mode_T, pn_Start_T_args);
196 res->graph_nr = get_irp_new_node_nr();
198 res->proj_args = NULL;
200 set_store(res->initial_mem);
202 add_immBlock_pred(res->start_block, projX);
204 * The code generation needs it. leave it in now.
205 * Use of this edge is matter of discussion, unresolved. Also possible:
206 * add_immBlock_pred(res->start_block, res->start_block), but invalid typed.
208 mature_immBlock (res->current_block);
210 /*-- Make a block to start with --*/
211 first_block = new_immBlock();
212 add_immBlock_pred (first_block, projX);
219 new_ir_graph (entity *ent, int n_loc)
221 ir_graph *res = new_r_ir_graph (ent, n_loc);
222 add_irp_irg(res); /* remember this graph global. */
226 /* Make a rudimentary ir graph for the constant code.
227 Must look like a correct irg, spare everything else. */
228 ir_graph *new_const_code_irg(void) {
234 /* inform statistics here, as blocks will be already build on this graph */
235 stat_new_graph(res, NULL);
237 current_ir_graph = res;
238 res->n_loc = 1; /* Only the memory. */
239 res->visited = 0; /* visited flag, for the ir walker */
240 res->block_visited=0; /* visited flag, for the 'block'-walker */
241 #if USE_EXPLICIT_PHI_IN_STACK
242 res->Phi_in_stack = NULL;
244 res->kind = k_ir_graph;
245 res->obst = xmalloc (sizeof(*res->obst));
246 obstack_init (res->obst);
247 res->phase_state = phase_building;
248 res->op_pin_state_pinned = op_pin_state_pinned;
249 res->value_table = new_identities (); /* value table for global value
250 numbering for optimizing use in
253 res->frame_type = NULL;
255 /* -- The end block -- */
256 res->end_block = new_immBlock ();
257 res->end = new_End ();
258 res->end_reg = res->end;
259 res->end_except = res->end;
260 mature_immBlock(get_cur_block()); /* mature the end block */
262 /* -- The start block -- */
263 res->start_block = new_immBlock ();
264 res->bad = new_ir_node (NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
265 res->no_mem = new_ir_node (NULL, res, res->start_block, op_NoMem, mode_M, 0, NULL);
266 res->start = new_Start ();
267 /* Proj results of start node */
268 res->initial_mem = new_Proj (res->start, mode_M, pn_Start_M);
269 projX = new_Proj (res->start, mode_X, pn_Start_X_initial_exec);
270 add_immBlock_pred (res->start_block, projX);
271 mature_immBlock (res->start_block); /* mature the start block */
273 add_immBlock_pred (new_immBlock (), projX);
274 mature_immBlock (get_cur_block()); /* mature the 'body' block for expressions */
276 /* Set the visited flag high enough that the blocks will never be visited. */
277 set_irn_visited(get_cur_block(), -1);
278 set_Block_block_visited(get_cur_block(), -1);
279 set_Block_block_visited(res->start_block, -1);
280 set_irn_visited(res->start_block, -1);
281 set_irn_visited(res->bad, -1);
282 set_irn_visited(res->no_mem, -1);
284 res->phase_state = phase_high;
288 /* Defined in iropt.c */
289 void del_identities (pset *value_table);
291 /* Frees the passed irgraph.
292 Deallocates all nodes in this graph and the ir_graph structure.
293 Sets the field irgraph in the corresponding entity to NULL.
294 Does not remove the irgraph from the list in irprog (requires
295 inefficient search, call remove_irp_irg by hand).
296 Does not free types, entities or modes that are used only by this
297 graph, nor the entity standing for this graph. */
298 void free_ir_graph (ir_graph *irg) {
300 stat_free_graph(irg);
301 if (irg->outs_state != outs_none) free_outs(irg);
302 if (irg->frame_type) free_type(irg->frame_type);
303 if (irg->value_table) del_identities(irg->value_table);
305 peculiarity pec = get_entity_peculiarity (irg->ent);
306 set_entity_peculiarity (irg->ent, peculiarity_description);
307 set_entity_irg(irg->ent, NULL); /* not set in const code irg */
308 set_entity_peculiarity (irg->ent, pec);
312 obstack_free(irg->obst,NULL);
314 #if USE_EXPLICIT_PHI_IN_STACK
315 free_Phi_in_stack(irg->Phi_in_stack);
321 /* access routines for all ir_graph attributes:
323 {attr type} get_irg_{attribute name} (ir_graph *irg);
324 void set_irg_{attr name} (ir_graph *irg, {attr type} {attr}); */
327 (is_ir_graph)(const void *thing) {
328 return __is_ir_graph(thing);
331 /* Outputs a unique number for this node */
334 get_irg_graph_nr(ir_graph *irg) {
337 return irg->graph_nr;
344 (get_irg_start_block)(const ir_graph *irg) {
345 return __get_irg_start_block(irg);
349 (set_irg_start_block)(ir_graph *irg, ir_node *node) {
350 __set_irg_start_block(irg, node);
354 (get_irg_start)(const ir_graph *irg) {
355 return __get_irg_start(irg);
359 (set_irg_start)(ir_graph *irg, ir_node *node) {
360 __set_irg_start(irg, node);
364 (get_irg_end_block)(const ir_graph *irg) {
365 return __get_irg_end_block(irg);
369 (set_irg_end_block)(ir_graph *irg, ir_node *node) {
370 __set_irg_end_block(irg, node);
374 (get_irg_end)(const ir_graph *irg) {
375 return __get_irg_end(irg);
379 (set_irg_end)(ir_graph *irg, ir_node *node) {
380 __set_irg_end(irg, node);
384 (get_irg_end_reg)(const ir_graph *irg) {
385 return __get_irg_end_reg(irg);
388 void set_irg_end_reg (ir_graph *irg, ir_node *node) {
389 assert(get_irn_op(node) == op_EndReg || get_irn_op(node) == op_End);
394 (get_irg_end_except)(const ir_graph *irg) {
395 return __get_irg_end_except(irg);
398 void set_irg_end_except (ir_graph *irg, ir_node *node) {
399 assert(get_irn_op(node) == op_EndExcept || get_irn_op(node) == op_End);
400 irg->end_except = node;
404 (get_irg_cstore)(const ir_graph *irg) {
405 return __get_irg_cstore(irg);
409 (set_irg_cstore)(ir_graph *irg, ir_node *node) {
410 __set_irg_cstore(irg, node);
414 (get_irg_frame)(const ir_graph *irg) {
415 return __get_irg_frame(irg);
419 (set_irg_frame)(ir_graph *irg, ir_node *node) {
420 __set_irg_frame(irg, node);
424 (get_irg_globals)(const ir_graph *irg) {
425 return __get_irg_globals(irg);
429 (set_irg_globals)(ir_graph *irg, ir_node *node) {
430 __set_irg_globals(irg, node);
434 (get_irg_initial_mem)(const ir_graph *irg) {
435 return __get_irg_initial_mem(irg);
439 (set_irg_initial_mem)(ir_graph *irg, ir_node *node) {
440 __set_irg_initial_mem(irg, node);
444 (get_irg_args)(const ir_graph *irg) {
445 return __get_irg_args(irg);
449 (set_irg_args)(ir_graph *irg, ir_node *node) {
450 __set_irg_args(irg, node);
454 (get_irg_proj_args) (const ir_graph *irg) {
455 return __get_irg_proj_args (irg);
459 (set_irg_proj_args) (ir_graph *irg, ir_node **nodes) {
460 __set_irg_proj_args (irg, nodes);
464 (get_irg_bad)(const ir_graph *irg) {
465 return __get_irg_bad(irg);
469 (set_irg_bad)(ir_graph *irg, ir_node *node) {
470 __set_irg_bad(irg, node);
474 (get_irg_no_mem)(const ir_graph *irg) {
475 return __get_irg_no_mem(irg);
479 (set_irg_no_mem)(ir_graph *irg, ir_node *node) {
480 __set_irg_no_mem(irg, node);
484 (get_irg_current_block)(const ir_graph *irg) {
485 return __get_irg_current_block(irg);
489 (set_irg_current_block)(ir_graph *irg, ir_node *node) {
490 __set_irg_current_block(irg, node);
494 (get_irg_entity)(const ir_graph *irg) {
495 return __get_irg_entity(irg);
499 (set_irg_entity)(ir_graph *irg, entity *ent) {
500 __set_irg_entity(irg, ent);
504 (get_irg_frame_type)(const ir_graph *irg) {
505 return __get_irg_frame_type(irg);
509 (set_irg_frame_type)(ir_graph *irg, type *ftp) {
510 __set_irg_frame_type(irg, ftp);
514 /* To test for a frame type */
516 is_frame_type(const type *ftp) {
518 if (is_class_type(ftp)) {
519 for (i = 0; i < get_irp_n_irgs(); i++) {
520 const type *frame_tp = get_irg_frame_type(get_irp_irg(i));
521 if (ftp == frame_tp) return true;
528 get_irg_n_locs (ir_graph *irg)
530 if (get_opt_precise_exc_context())
531 return irg->n_loc - 1 - 1;
533 return irg->n_loc - 1;
537 set_irg_n_loc (ir_graph *irg, int n_loc)
539 if (get_opt_precise_exc_context())
540 irg->n_loc = n_loc + 1 + 1;
542 irg->n_loc = n_loc + 1;
547 /* Returns the obstack associated with the graph. */
549 (get_irg_obstack)(const ir_graph *irg) {
550 return __get_irg_obstack(irg);
554 * Returns true if the node n is allocated on the storage of graph irg.
556 * Implementation is GLIBC specific as is uses the internal _obstack_chunk implementation.
558 int node_is_in_irgs_storage(ir_graph *irg, ir_node *n)
560 struct _obstack_chunk *p;
563 * checks wheater the ir_node pointer i on the obstack.
564 * A more sophisticated check would test the "whole" ir_node
566 for (p = irg->obst->chunk; p; p = p->prev) {
567 if (((char *)p->contents <= (char *)n) && ((char *)n < (char *)p->limit))
575 (get_irg_phase_state)(const ir_graph *irg) {
576 return __get_irg_phase_state(irg);
580 (set_irg_phase_low)(ir_graph *irg) {
581 __set_irg_phase_low(irg);
585 (get_irg_pinned)(const ir_graph *irg) {
586 return __get_irg_pinned(irg);
590 (get_irg_outs_state)(const ir_graph *irg) {
591 return __get_irg_outs_state(irg);
595 (set_irg_outs_inconsistent)(ir_graph *irg) {
596 __set_irg_outs_inconsistent(irg);
600 (get_irg_dom_state)(const ir_graph *irg) {
601 return __get_irg_dom_state(irg);
605 (set_irg_dom_inconsistent)(ir_graph *irg) {
606 __set_irg_dom_inconsistent(irg);
610 (get_irg_loopinfo_state)(const ir_graph *irg) {
611 return __get_irg_loopinfo_state(irg);
615 (set_irg_loopinfo_state)(ir_graph *irg, irg_loopinfo_state s) {
616 __set_irg_loopinfo_state(irg, s);
620 set_irg_loopinfo_inconsistent(ir_graph *irg) {
621 if (irg->loopinfo_state == loopinfo_ip_consistent)
622 irg->loopinfo_state = loopinfo_ip_inconsistent;
624 else if (irg->loopinfo_state == loopinfo_consistent)
625 irg->loopinfo_state = loopinfo_inconsistent;
627 else if (irg->loopinfo_state == loopinfo_cf_ip_consistent)
628 irg->loopinfo_state = loopinfo_cf_ip_inconsistent;
630 else if (irg->loopinfo_state == loopinfo_cf_consistent)
631 irg->loopinfo_state = loopinfo_cf_inconsistent;
635 (set_irg_pinned)(ir_graph *irg, op_pin_state p) {
636 __set_irg_pinned(irg, p);
639 irg_callee_info_state
640 (get_irg_callee_info_state)(const ir_graph *irg) {
641 return __get_irg_callee_info_state(irg);
645 (set_irg_callee_info_state)(ir_graph *irg, irg_callee_info_state s) {
646 __set_irg_callee_info_state(irg, s);
650 (get_irg_inline_property)(const ir_graph *irg) {
651 return __get_irg_inline_property(irg);
655 (set_irg_inline_property)(ir_graph *irg, irg_inline_property s) {
656 __set_irg_inline_property(irg, s);
660 (set_irg_link)(ir_graph *irg, void *thing) {
661 __set_irg_link(irg, thing);
665 (get_irg_link)(const ir_graph *irg) {
666 return __get_irg_link(irg);
669 /** maximum visited flag content of all ir_graph visited fields. */
670 static unsigned long max_irg_visited = 0;
673 (get_irg_visited)(const ir_graph *irg) {
674 return __get_irg_visited(irg);
678 set_irg_visited (ir_graph *irg, unsigned long visited)
680 irg->visited = visited;
681 if (irg->visited > max_irg_visited) {
682 max_irg_visited = irg->visited;
687 inc_irg_visited (ir_graph *irg)
689 if (++irg->visited > max_irg_visited) {
690 max_irg_visited = irg->visited;
695 get_max_irg_visited(void)
699 for(i = 0; i < get_irp_n_irgs(); i++)
700 assert(max_irg_visited >= get_irg_visited(get_irp_irg(i)));
702 return max_irg_visited;
705 void set_max_irg_visited(int val) {
706 max_irg_visited = val;
710 inc_max_irg_visited(void)
714 for(i = 0; i < get_irp_n_irgs(); i++)
715 assert(max_irg_visited >= get_irg_visited(get_irp_irg(i)));
718 return max_irg_visited;
722 (get_irg_block_visited)(const ir_graph *irg) {
723 return __get_irg_block_visited(irg);
727 (set_irg_block_visited)(ir_graph *irg, unsigned long visited) {
728 __set_irg_block_visited(irg, visited);
732 (inc_irg_block_visited)(ir_graph *irg) {
733 __inc_irg_block_visited(irg);
738 * walker Start->End: places Proj nodes into the same block
739 * as it's predecessors
744 static void normalize_proj_walker(ir_node *n, void *env)
747 ir_node *pred = get_Proj_pred(n);
748 ir_node *block = get_nodes_block(pred);
750 set_nodes_block(n, block);
754 /* put the proj's into the same block as its predecessors */
755 void normalize_proj_nodes(ir_graph *irg)
757 irg_walk_graph(irg, NULL, normalize_proj_walker, NULL);
758 set_irg_outs_inconsistent(irg);
761 size_t register_additional_graph_data(size_t size)
763 assert(!forbid_new_data && "Too late to register additional node data");
768 return additional_graph_data_size += size;