3 * File name: ir/ir/irgraph.c
4 * Purpose: Entry point to the representation of procedure code.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 # include "irgraph_t.h"
21 # include "irprog_t.h"
23 # include "irflag_t.h"
28 # include "firmstat.h"
30 ir_graph *current_ir_graph;
31 INLINE ir_graph *get_current_ir_graph(void) {
32 return current_ir_graph;
34 INLINE void set_current_ir_graph(ir_graph *graph) {
35 current_ir_graph = graph;
39 bool interprocedural_view = false;
40 INLINE bool get_interprocedural_view(void) {
41 return interprocedural_view;
43 INLINE void set_interprocedural_view(bool state) {
44 interprocedural_view = state;
47 static ident* frame_type_suffix = NULL;
48 void init_irgraph(void) {
49 frame_type_suffix = id_from_str(FRAME_TP_SUFFIX, strlen(FRAME_TP_SUFFIX));
52 #if USE_EXPLICIT_PHI_IN_STACK
53 /* really defined in ircons.c */
54 typedef struct Phi_in_stack Phi_in_stack;
55 Phi_in_stack *new_Phi_in_stack();
56 void free_Phi_in_stack(Phi_in_stack *s);
59 /* Allocates a list of nodes:
60 - The start block containing a start node and Proj nodes for it's four
61 results (X, M, P, Tuple).
62 - The end block containing an end node. This block is not matured after
63 new_ir_graph as predecessors need to be added to it.
64 - The current block, which is empty and also not matured.
65 Further it allocates several datastructures needed for graph construction
69 new_ir_graph (entity *ent, int n_loc)
75 res = (ir_graph *) malloc (sizeof (ir_graph));
76 memset(res, 0, sizeof (ir_graph));
79 /* inform statistics here, as blocks will be already build on this graph */
80 stat_new_graph(res, ent);
82 current_ir_graph = res;
83 add_irp_irg(res); /* remember this graph global. */
85 /*-- initialized for each graph. --*/
86 if (get_opt_precise_exc_context()) {
87 res->n_loc = n_loc + 1 + 1; /* number of local variables that are never
88 dereferenced in this graph plus one for
89 the store plus one for links to fragile
90 operations. n_loc is not the number of
91 parameters to the procedure! */
94 res->n_loc = n_loc + 1; /* number of local variables that are never
95 dereferenced in this graph plus one for
96 the store. This is not the number of parameters
100 res->visited = 0; /* visited flag, for the ir walker */
101 res->block_visited=0; /* visited flag, for the 'block'-walker */
103 #if USE_EXPLICIT_PHI_IN_STACK
104 res->Phi_in_stack = new_Phi_in_stack(); /* A stack needed for automatic Phi
107 res->kind = k_ir_graph;
108 res->obst = (struct obstack *) xmalloc (sizeof (struct obstack));
109 obstack_init (res->obst);
110 res->value_table = new_identities (); /* value table for global value
111 numbering for optimizing use in
115 res->phase_state = phase_building;
116 res->pinned = pinned;
117 res->outs_state = no_outs;
118 res->dom_state = no_dom;
119 res->typeinfo_state = irg_typeinfo_none;
120 res->loopinfo_state = loopinfo_none;
122 /*-- Type information for the procedure of the graph --*/
124 set_entity_irg(ent, res);
126 /*-- contain "inner" methods as in Pascal. --*/
127 res->frame_type = new_type_class(mangle(get_entity_ident(ent), frame_type_suffix));
129 /* Remove type from type list. Must be treated differently than other types. */
130 remove_irp_type_from_list(res->frame_type);
132 /*-- Nodes needed in every graph --*/
133 res->end_block = new_immBlock();
134 res->end = new_End();
135 res->end_reg = res->end;
136 res->end_except = res->end;
138 res->start_block = new_immBlock();
139 res->start = new_Start();
140 res->bad = new_ir_node(NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
141 /* res->unknown = new_ir_node (NULL, res, res->start_block, op_Unknown, mode_T, 0, NULL); */
143 /* Proj results of start node */
144 projX = new_Proj (res->start, mode_X, pns_initial_exec);
145 res->frame = new_Proj (res->start, mode_P_mach, pns_frame_base);
146 res->globals = new_Proj (res->start, mode_P_mach, pns_globals);
147 res->initial_mem = new_Proj (res->start, mode_M, pns_global_store);
148 res->args = new_Proj (res->start, mode_T, pns_args);
150 res->graph_nr = get_irp_new_node_nr();
153 set_store(res->initial_mem);
155 add_in_edge(res->start_block, projX);
157 * The code generation needs it. leave it in now.
158 * Use of this edge is matter of discussion, unresolved. Also possible:
159 * add_in_edge(res->start_block, res->start_block), but invalid typed.
161 mature_block (res->current_block);
163 /*-- Make a block to start with --*/
164 first_block = new_immBlock();
165 add_in_edge (first_block, projX);
171 /* Make a rudimentary ir graph for the constant code.
172 Must look like a correct irg, spare everything else. */
173 ir_graph *new_const_code_irg(void) {
177 res = (ir_graph *) malloc (sizeof(*res));
178 memset(res, 0, sizeof(*res));
180 /* inform statistics here, as blocks will be already build on this graph */
181 stat_new_graph(res, NULL);
183 current_ir_graph = res;
184 res->n_loc = 1; /* Only the memory. */
185 res->visited = 0; /* visited flag, for the ir walker */
186 res->block_visited=0; /* visited flag, for the 'block'-walker */
187 #if USE_EXPLICIT_PHI_IN_STACK
188 res->Phi_in_stack = NULL;
190 res->kind = k_ir_graph;
191 res->obst = (struct obstack *) xmalloc (sizeof (struct obstack));
192 obstack_init (res->obst);
193 res->phase_state = phase_building;
194 res->pinned = pinned;
195 res->value_table = new_identities (); /* value table for global value
196 numbering for optimizing use in
199 res->frame_type = NULL;
200 res->start_block = new_immBlock ();
201 res->end_block = new_immBlock ();
202 res->end = new_End ();
203 res->end_reg = res->end;
204 res->end_except = res->end;
205 mature_block(get_cur_block());
206 res->bad = new_ir_node (NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
207 /* res->unknown = new_ir_node (NULL, res, res->start_block, op_Unknown, mode_T, 0, NULL); */
208 res->start = new_Start ();
210 /* Proj results of start node */
211 projX = new_Proj (res->start, mode_X, pns_initial_exec);
212 add_in_edge(res->start_block, projX);
213 mature_block (res->current_block);
214 add_in_edge (new_immBlock (), projX);
215 mature_block(get_cur_block());
216 /* Set the visited flag high enough that the block will never be visited. */
217 set_irn_visited(get_cur_block(), -1);
218 set_Block_block_visited(get_cur_block(), -1);
219 set_Block_block_visited(res->start_block, -1);
220 set_irn_visited(res->start_block, -1);
221 set_irn_visited(res->bad, -1);
225 /* Defined in iropt.c */
226 void del_identities (pset *value_table);
228 /* Frees the passed irgraph.
229 Deallocates all nodes in this graph and the ir_graph structure.
230 Sets the field irgraph in the corresponding entity to NULL.
231 Does not remove the irgraph from the list in irprog (requires
232 inefficient search, call remove_irp_irg by hand).
233 Does not free types, entities or modes that are used only by this
234 graph, nor the entity standing for this graph. */
235 void free_ir_graph (ir_graph *irg) {
236 stat_free_graph(irg);
237 if (irg->outs_state != no_outs) free_outs(irg);
238 if (irg->frame_type) free_type(irg->frame_type);
239 if (irg->value_table) del_identities(irg->value_table);
240 if (irg->ent) set_entity_irg(irg->ent, NULL); /* not set in const code irg */
242 obstack_free(irg->obst,NULL);
244 #if USE_EXPLICIT_PHI_IN_STACK
245 free_Phi_in_stack(irg->Phi_in_stack);
251 /* access routines for all ir_graph attributes:
253 {attr type} get_irg_{attribute name} (ir_graph *irg);
254 void set_irg_{attr name} (ir_graph *irg, {attr type} {attr}); */
257 (is_ir_graph)(void *thing) {
258 return __is_ir_graph(thing);
261 /* Outputs a unique number for this node */
264 get_irg_graph_nr(ir_graph *irg) {
267 return irg->graph_nr;
274 (get_irg_start_block)(ir_graph *irg) {
275 return __get_irg_start_block(irg);
279 (set_irg_start_block)(ir_graph *irg, ir_node *node) {
280 __set_irg_start_block(irg, node);
284 (get_irg_start)(ir_graph *irg) {
285 return __get_irg_start(irg);
289 (set_irg_start)(ir_graph *irg, ir_node *node) {
290 __set_irg_start(irg, node);
294 (get_irg_end_block)(ir_graph *irg) {
295 return __get_irg_end_block(irg);
299 (set_irg_end_block)(ir_graph *irg, ir_node *node) {
300 __set_irg_end_block(irg, node);
304 (get_irg_end)(ir_graph *irg) {
305 return __get_irg_end(irg);
309 (set_irg_end)(ir_graph *irg, ir_node *node) {
310 __set_irg_end(irg, node);
314 (get_irg_end_reg)(ir_graph *irg) {
315 return __get_irg_end_reg(irg);
318 void set_irg_end_reg (ir_graph *irg, ir_node *node) {
319 assert(get_irn_op(node) == op_EndReg || get_irn_op(node) == op_End);
324 (get_irg_end_except)(ir_graph *irg) {
325 return __get_irg_end_except(irg);
328 void set_irg_end_except (ir_graph *irg, ir_node *node) {
329 assert(get_irn_op(node) == op_EndExcept || get_irn_op(node) == op_End);
330 irg->end_except = node;
334 (get_irg_cstore)(ir_graph *irg) {
335 return __get_irg_cstore(irg);
339 (set_irg_cstore)(ir_graph *irg, ir_node *node) {
340 __set_irg_cstore(irg, node);
344 (get_irg_frame)(ir_graph *irg) {
345 return __get_irg_frame(irg);
349 (set_irg_frame)(ir_graph *irg, ir_node *node) {
350 __set_irg_frame(irg, node);
354 (get_irg_globals)(ir_graph *irg) {
355 return __get_irg_globals(irg);
359 (set_irg_globals)(ir_graph *irg, ir_node *node) {
360 __set_irg_globals(irg, node);
364 (get_irg_initial_mem)(ir_graph *irg)
366 return __get_irg_initial_mem(irg);
370 (set_irg_initial_mem)(ir_graph *irg, ir_node *node) {
371 __set_irg_initial_mem(irg, node);
375 (get_irg_args)(ir_graph *irg) {
376 return __get_irg_args(irg);
380 (set_irg_args)(ir_graph *irg, ir_node *node) {
381 __set_irg_args(irg, node);
385 (get_irg_bad)(ir_graph *irg) {
386 return __get_irg_bad(irg);
390 (set_irg_bad)(ir_graph *irg, ir_node *node) {
391 __set_irg_bad(irg, node);
394 /* GL removed: we need unknown with mode for analyses.
396 get_irg_unknown (ir_graph *irg)
402 set_irg_unknown (ir_graph *irg, ir_node *node)
409 (get_irg_current_block)(ir_graph *irg) {
410 return __get_irg_current_block(irg);
414 (set_irg_current_block)(ir_graph *irg, ir_node *node) {
415 __set_irg_current_block(irg, node);
419 (get_irg_ent)(ir_graph *irg) {
420 return __get_irg_ent(irg);
424 (set_irg_ent)(ir_graph *irg, entity *ent) {
425 __set_irg_ent(irg, ent);
429 (get_irg_frame_type)(ir_graph *irg) {
430 return __get_irg_frame_type(irg);
434 (set_irg_frame_type)(ir_graph *irg, type *ftp) {
435 __set_irg_frame_type(irg, ftp);
439 /* To test for a frame type */
441 is_frame_type(type *ftp) {
443 if (is_class_type(ftp)) {
444 for (i = 0; i < get_irp_n_irgs(); i++) {
445 type *frame_tp = get_irg_frame_type(get_irp_irg(i));
446 if (ftp == frame_tp) return true;
453 get_irg_n_locs (ir_graph *irg)
455 if (get_opt_precise_exc_context())
456 return irg->n_loc - 1 - 1;
458 return irg->n_loc - 1;
462 set_irg_n_loc (ir_graph *irg, int n_loc)
464 if (get_opt_precise_exc_context())
465 irg->n_loc = n_loc + 1 + 1;
467 irg->n_loc = n_loc + 1;
472 /* Returns the obstack associated with the graph. */
474 (get_irg_obstack)(ir_graph *irg) {
475 return __get_irg_obstack(irg);
479 * Returns true if the node n is allocated on the storage of graph irg.
481 * Implementation is GLIBC specific as is uses the internal _obstack_chunk implementation.
483 int node_is_in_irgs_storage(ir_graph *irg, ir_node *n)
485 struct _obstack_chunk *p;
488 * checks wheater the ir_node pointer i on the obstack.
489 * A more sophisticated check would test the "whole" ir_node
491 for (p = irg->obst->chunk; p; p = p->prev) {
492 if (((char *)p->contents <= (char *)n) && ((char *)n < (char *)p->limit))
500 (get_irg_phase_state)(ir_graph *irg) {
501 return __get_irg_phase_state(irg);
505 (set_irg_phase_low)(ir_graph *irg) {
506 __set_irg_phase_low(irg);
510 (get_irg_pinned)(ir_graph *irg) {
511 return __get_irg_pinned(irg);
515 (get_irg_outs_state)(ir_graph *irg) {
516 return __get_irg_outs_state(irg);
520 (set_irg_outs_inconsistent)(ir_graph *irg) {
521 __set_irg_outs_inconsistent(irg);
525 (get_irg_dom_state)(ir_graph *irg) {
526 return __get_irg_dom_state(irg);
530 (set_irg_dom_inconsistent)(ir_graph *irg) {
531 __set_irg_dom_inconsistent(irg);
535 (get_irg_loopinfo_state)(ir_graph *irg) {
536 return __get_irg_loopinfo_state(irg);
540 (set_irg_loopinfo_state)(ir_graph *irg, irg_loopinfo_state s) {
541 __set_irg_loopinfo_state(irg, s);
545 set_irg_loopinfo_inconsistent(ir_graph *irg) {
546 if (irg->loopinfo_state == loopinfo_ip_consistent)
547 irg->loopinfo_state = loopinfo_ip_inconsistent;
549 else if (irg->loopinfo_state == loopinfo_consistent)
550 irg->loopinfo_state = loopinfo_inconsistent;
552 else if (irg->loopinfo_state == loopinfo_cf_ip_consistent)
553 irg->loopinfo_state = loopinfo_cf_ip_inconsistent;
555 else if (irg->loopinfo_state == loopinfo_cf_consistent)
556 irg->loopinfo_state = loopinfo_cf_inconsistent;
560 (set_irg_pinned)(ir_graph *irg, op_pinned p) {
561 __set_irg_pinned(irg, p);
564 irg_callee_info_state
565 (get_irg_callee_info_state)(ir_graph *irg) {
566 return __get_irg_callee_info_state(irg);
570 (set_irg_callee_info_state)(ir_graph *irg, irg_callee_info_state s) {
571 __set_irg_callee_info_state(irg, s);
575 (get_irg_inline_property)(ir_graph *irg) {
576 return __get_irg_inline_property(irg);
580 (set_irg_inline_property)(ir_graph *irg, irg_inline_property s) {
581 __set_irg_inline_property(irg, s);
585 (set_irg_link)(ir_graph *irg, void *thing) {
586 __set_irg_link(irg, thing);
590 (get_irg_link)(ir_graph *irg) {
591 return __get_irg_link(irg);
594 /* maximum visited flag content of all ir_graph visited fields. */
595 static int max_irg_visited = 0;
598 (get_irg_visited)(ir_graph *irg) {
599 return __get_irg_visited(irg);
603 set_irg_visited (ir_graph *irg, unsigned long visited)
605 irg->visited = visited;
606 if (irg->visited > max_irg_visited) {
607 max_irg_visited = irg->visited;
612 inc_irg_visited (ir_graph *irg)
614 if (++irg->visited > max_irg_visited) {
615 max_irg_visited = irg->visited;
620 get_max_irg_visited(void)
624 for(i = 0; i < get_irp_n_irgs(); i++)
625 assert(max_irg_visited >= get_irg_visited(get_irp_irg(i)));
627 return max_irg_visited;
630 void set_max_irg_visited(int val) {
631 max_irg_visited = val;
635 inc_max_irg_visited(void)
639 for(i = 0; i < get_irp_n_irgs(); i++)
640 assert(max_irg_visited >= get_irg_visited(get_irp_irg(i)));
643 return max_irg_visited;
647 (get_irg_block_visited)(ir_graph *irg) {
648 return __get_irg_block_visited(irg);
652 (set_irg_block_visited)(ir_graph *irg, unsigned long visited) {
653 __set_irg_block_visited(irg, visited);
657 (inc_irg_block_visited)(ir_graph *irg) {
658 __inc_irg_block_visited(irg);