3 * File name: ir/ir/irgraph.c
4 * Purpose: Entry point to the representation of procedure code.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
25 # include "irgraph_t.h"
26 # include "irprog_t.h"
27 # include "irnode_t.h"
29 # include "irflag_t.h"
34 # include "firmstat.h"
38 * Indicates, whether additional data can be registered to graphs.
39 * If set to 1, this is not possible anymore.
41 static int forbid_new_data = 0;
44 * The amount of additional space for custom data to be allocated upon
45 * creating a new graph.
47 static size_t additional_graph_data_size = 0;
49 ir_graph *current_ir_graph;
50 INLINE ir_graph *get_current_ir_graph(void) {
51 return current_ir_graph;
53 INLINE void set_current_ir_graph(ir_graph *graph) {
54 current_ir_graph = graph;
58 int __interprocedural_view = false;
60 int (get_interprocedural_view)(void) {
61 return __get_interprocedural_view();
64 void (set_interprocedural_view)(int state) {
65 __interprocedural_view = state;
67 /* set function vectors for faster access */
69 __get_irn_arity = __get_irn_inter_arity;
70 __get_irn_n = __get_irn_inter_n;
73 __get_irn_arity = __get_irn_intra_arity;
74 __get_irn_n = __get_irn_intra_n;
78 static ident* frame_type_suffix = NULL;
79 void init_irgraph(void) {
80 frame_type_suffix = new_id_from_str(FRAME_TP_SUFFIX);
85 * Allocate a new ir graph.
86 * This function respects the registered graph data. The only reason for
87 * this function is, that there are two locations, where graphs are
88 * allocated (new_r_ir_graph, new_const_code_irg).
89 * @return Memory for a new graph.
91 ir_graph *alloc_graph(void)
93 size_t size = sizeof(ir_graph) + additional_graph_data_size;
94 char *ptr = xmalloc(size);
97 return (ir_graph *) (ptr + additional_graph_data_size);
100 #if USE_EXPLICIT_PHI_IN_STACK
101 /* really defined in ircons.c */
102 typedef struct Phi_in_stack Phi_in_stack;
103 Phi_in_stack *new_Phi_in_stack();
104 void free_Phi_in_stack(Phi_in_stack *s);
107 /* Allocates a list of nodes:
108 - The start block containing a start node and Proj nodes for it's four
109 results (X, M, P, Tuple).
110 - The end block containing an end node. This block is not matured after
111 new_ir_graph as predecessors need to be added to it.
112 - The current block, which is empty and also not matured.
113 Further it allocates several datastructures needed for graph construction
117 new_r_ir_graph (entity *ent, int n_loc)
120 ir_node *first_block;
124 res->kind = k_ir_graph;
126 /* inform statistics here, as blocks will be already build on this graph */
127 stat_new_graph(res, ent);
129 current_ir_graph = res;
131 /*-- initialized for each graph. --*/
132 if (get_opt_precise_exc_context()) {
133 res->n_loc = n_loc + 1 + 1; /* number of local variables that are never
134 dereferenced in this graph plus one for
135 the store plus one for links to fragile
136 operations. n_loc is not the number of
137 parameters to the procedure! */
140 res->n_loc = n_loc + 1; /* number of local variables that are never
141 dereferenced in this graph plus one for
142 the store. This is not the number of parameters
146 res->visited = 0; /* visited flag, for the ir walker */
147 res->block_visited = 0; /* visited flag, for the 'block'-walker */
149 #if USE_EXPLICIT_PHI_IN_STACK
150 res->Phi_in_stack = new_Phi_in_stack(); /* A stack needed for automatic Phi
153 res->kind = k_ir_graph;
154 res->obst = (struct obstack *) xmalloc (sizeof (struct obstack));
155 obstack_init (res->obst);
156 res->value_table = new_identities (); /* value table for global value
157 numbering for optimizing use in
161 res->phase_state = phase_building;
162 res->op_pin_state_pinned = op_pin_state_pinned;
163 res->outs_state = outs_none;
164 res->dom_state = dom_none;
165 res->typeinfo_state = irg_typeinfo_none;
166 res->loopinfo_state = loopinfo_none;
168 /*-- Type information for the procedure of the graph --*/
170 set_entity_irg(ent, res);
172 /*-- a class type so that it can contain "inner" methods as in Pascal. --*/
173 res->frame_type = new_type_class(mangle(get_entity_ident(ent), frame_type_suffix));
175 /* Remove type from type list. Must be treated differently than other types. */
176 remove_irp_type_from_list(res->frame_type);
178 /*-- Nodes needed in every graph --*/
179 res->end_block = new_immBlock();
180 res->end = new_End();
181 res->end_reg = res->end;
182 res->end_except = res->end;
184 res->start_block = new_immBlock();
185 res->start = new_Start();
186 res->bad = new_ir_node(NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
187 res->no_mem = new_ir_node(NULL, res, res->start_block, op_NoMem, mode_M, 0, NULL);
189 /* Proj results of start node */
190 projX = new_Proj (res->start, mode_X, pn_Start_X_initial_exec);
191 res->frame = new_Proj (res->start, mode_P_mach, pn_Start_P_frame_base);
192 res->globals = new_Proj (res->start, mode_P_mach, pn_Start_P_globals);
193 res->initial_mem = new_Proj (res->start, mode_M, pn_Start_M);
194 res->args = new_Proj (res->start, mode_T, pn_Start_T_args);
196 res->graph_nr = get_irp_new_node_nr();
199 set_store(res->initial_mem);
201 add_immBlock_pred(res->start_block, projX);
203 * The code generation needs it. leave it in now.
204 * Use of this edge is matter of discussion, unresolved. Also possible:
205 * add_immBlock_pred(res->start_block, res->start_block), but invalid typed.
207 mature_immBlock (res->current_block);
209 /*-- Make a block to start with --*/
210 first_block = new_immBlock();
211 add_immBlock_pred (first_block, projX);
218 new_ir_graph (entity *ent, int n_loc)
220 ir_graph *res = new_r_ir_graph (ent, n_loc);
221 add_irp_irg(res); /* remember this graph global. */
225 /* Make a rudimentary ir graph for the constant code.
226 Must look like a correct irg, spare everything else. */
227 ir_graph *new_const_code_irg(void) {
233 /* inform statistics here, as blocks will be already build on this graph */
234 stat_new_graph(res, NULL);
236 current_ir_graph = res;
237 res->n_loc = 1; /* Only the memory. */
238 res->visited = 0; /* visited flag, for the ir walker */
239 res->block_visited=0; /* visited flag, for the 'block'-walker */
240 #if USE_EXPLICIT_PHI_IN_STACK
241 res->Phi_in_stack = NULL;
243 res->kind = k_ir_graph;
244 res->obst = (struct obstack *) xmalloc (sizeof (struct obstack));
245 obstack_init (res->obst);
246 res->phase_state = phase_building;
247 res->op_pin_state_pinned = op_pin_state_pinned;
248 res->value_table = new_identities (); /* value table for global value
249 numbering for optimizing use in
252 res->frame_type = NULL;
254 /* -- The end block -- */
255 res->end_block = new_immBlock ();
256 res->end = new_End ();
257 res->end_reg = res->end;
258 res->end_except = res->end;
259 mature_immBlock(get_cur_block()); /* mature the end block */
261 /* -- The start block -- */
262 res->start_block = new_immBlock ();
263 res->bad = new_ir_node (NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
264 res->no_mem = new_ir_node (NULL, res, res->start_block, op_NoMem, mode_M, 0, NULL);
265 res->start = new_Start ();
266 /* Proj results of start node */
267 res->initial_mem = new_Proj (res->start, mode_M, pn_Start_M);
268 projX = new_Proj (res->start, mode_X, pn_Start_X_initial_exec);
269 add_immBlock_pred (res->start_block, projX);
270 mature_immBlock (res->start_block); /* mature the start block */
272 add_immBlock_pred (new_immBlock (), projX);
273 mature_immBlock (get_cur_block()); /* mature the 'body' block for expressions */
275 /* Set the visited flag high enough that the blocks will never be visited. */
276 set_irn_visited(get_cur_block(), -1);
277 set_Block_block_visited(get_cur_block(), -1);
278 set_Block_block_visited(res->start_block, -1);
279 set_irn_visited(res->start_block, -1);
280 set_irn_visited(res->bad, -1);
281 set_irn_visited(res->no_mem, -1);
283 res->phase_state = phase_high;
287 /* Defined in iropt.c */
288 void del_identities (pset *value_table);
290 /* Frees the passed irgraph.
291 Deallocates all nodes in this graph and the ir_graph structure.
292 Sets the field irgraph in the corresponding entity to NULL.
293 Does not remove the irgraph from the list in irprog (requires
294 inefficient search, call remove_irp_irg by hand).
295 Does not free types, entities or modes that are used only by this
296 graph, nor the entity standing for this graph. */
297 void free_ir_graph (ir_graph *irg) {
299 stat_free_graph(irg);
300 if (irg->outs_state != outs_none) free_outs(irg);
301 if (irg->frame_type) free_type(irg->frame_type);
302 if (irg->value_table) del_identities(irg->value_table);
304 peculiarity pec = get_entity_peculiarity (irg->ent);
305 set_entity_peculiarity (irg->ent, peculiarity_description);
306 set_entity_irg(irg->ent, NULL); /* not set in const code irg */
307 set_entity_peculiarity (irg->ent, pec);
311 obstack_free(irg->obst,NULL);
313 #if USE_EXPLICIT_PHI_IN_STACK
314 free_Phi_in_stack(irg->Phi_in_stack);
320 /* access routines for all ir_graph attributes:
322 {attr type} get_irg_{attribute name} (ir_graph *irg);
323 void set_irg_{attr name} (ir_graph *irg, {attr type} {attr}); */
326 (is_ir_graph)(const void *thing) {
327 return __is_ir_graph(thing);
330 /* Outputs a unique number for this node */
333 get_irg_graph_nr(ir_graph *irg) {
336 return irg->graph_nr;
343 (get_irg_start_block)(const ir_graph *irg) {
344 return __get_irg_start_block(irg);
348 (set_irg_start_block)(ir_graph *irg, ir_node *node) {
349 __set_irg_start_block(irg, node);
353 (get_irg_start)(const ir_graph *irg) {
354 return __get_irg_start(irg);
358 (set_irg_start)(ir_graph *irg, ir_node *node) {
359 __set_irg_start(irg, node);
363 (get_irg_end_block)(const ir_graph *irg) {
364 return __get_irg_end_block(irg);
368 (set_irg_end_block)(ir_graph *irg, ir_node *node) {
369 __set_irg_end_block(irg, node);
373 (get_irg_end)(const ir_graph *irg) {
374 return __get_irg_end(irg);
378 (set_irg_end)(ir_graph *irg, ir_node *node) {
379 __set_irg_end(irg, node);
383 (get_irg_end_reg)(const ir_graph *irg) {
384 return __get_irg_end_reg(irg);
387 void set_irg_end_reg (ir_graph *irg, ir_node *node) {
388 assert(get_irn_op(node) == op_EndReg || get_irn_op(node) == op_End);
393 (get_irg_end_except)(const ir_graph *irg) {
394 return __get_irg_end_except(irg);
397 void set_irg_end_except (ir_graph *irg, ir_node *node) {
398 assert(get_irn_op(node) == op_EndExcept || get_irn_op(node) == op_End);
399 irg->end_except = node;
403 (get_irg_cstore)(const ir_graph *irg) {
404 return __get_irg_cstore(irg);
408 (set_irg_cstore)(ir_graph *irg, ir_node *node) {
409 __set_irg_cstore(irg, node);
413 (get_irg_frame)(const ir_graph *irg) {
414 return __get_irg_frame(irg);
418 (set_irg_frame)(ir_graph *irg, ir_node *node) {
419 __set_irg_frame(irg, node);
423 (get_irg_globals)(const ir_graph *irg) {
424 return __get_irg_globals(irg);
428 (set_irg_globals)(ir_graph *irg, ir_node *node) {
429 __set_irg_globals(irg, node);
433 (get_irg_initial_mem)(const ir_graph *irg) {
434 return __get_irg_initial_mem(irg);
438 (set_irg_initial_mem)(ir_graph *irg, ir_node *node) {
439 __set_irg_initial_mem(irg, node);
443 (get_irg_args)(const ir_graph *irg) {
444 return __get_irg_args(irg);
448 (set_irg_args)(ir_graph *irg, ir_node *node) {
449 __set_irg_args(irg, node);
453 (get_irg_bad)(const ir_graph *irg) {
454 return __get_irg_bad(irg);
458 (set_irg_bad)(ir_graph *irg, ir_node *node) {
459 __set_irg_bad(irg, node);
463 (get_irg_no_mem)(const ir_graph *irg) {
464 return __get_irg_no_mem(irg);
468 (set_irg_no_mem)(ir_graph *irg, ir_node *node) {
469 __set_irg_no_mem(irg, node);
473 (get_irg_current_block)(const ir_graph *irg) {
474 return __get_irg_current_block(irg);
478 (set_irg_current_block)(ir_graph *irg, ir_node *node) {
479 __set_irg_current_block(irg, node);
483 (get_irg_entity)(const ir_graph *irg) {
484 return __get_irg_entity(irg);
488 (set_irg_entity)(ir_graph *irg, entity *ent) {
489 __set_irg_entity(irg, ent);
493 (get_irg_frame_type)(const ir_graph *irg) {
494 return __get_irg_frame_type(irg);
498 (set_irg_frame_type)(ir_graph *irg, type *ftp) {
499 __set_irg_frame_type(irg, ftp);
503 /* To test for a frame type */
505 is_frame_type(const type *ftp) {
507 if (is_class_type(ftp)) {
508 for (i = 0; i < get_irp_n_irgs(); i++) {
509 const type *frame_tp = get_irg_frame_type(get_irp_irg(i));
510 if (ftp == frame_tp) return true;
517 get_irg_n_locs (ir_graph *irg)
519 if (get_opt_precise_exc_context())
520 return irg->n_loc - 1 - 1;
522 return irg->n_loc - 1;
526 set_irg_n_loc (ir_graph *irg, int n_loc)
528 if (get_opt_precise_exc_context())
529 irg->n_loc = n_loc + 1 + 1;
531 irg->n_loc = n_loc + 1;
536 /* Returns the obstack associated with the graph. */
538 (get_irg_obstack)(const ir_graph *irg) {
539 return __get_irg_obstack(irg);
543 * Returns true if the node n is allocated on the storage of graph irg.
545 * Implementation is GLIBC specific as is uses the internal _obstack_chunk implementation.
547 int node_is_in_irgs_storage(ir_graph *irg, ir_node *n)
549 struct _obstack_chunk *p;
552 * checks wheater the ir_node pointer i on the obstack.
553 * A more sophisticated check would test the "whole" ir_node
555 for (p = irg->obst->chunk; p; p = p->prev) {
556 if (((char *)p->contents <= (char *)n) && ((char *)n < (char *)p->limit))
564 (get_irg_phase_state)(const ir_graph *irg) {
565 return __get_irg_phase_state(irg);
569 (set_irg_phase_low)(ir_graph *irg) {
570 __set_irg_phase_low(irg);
574 (get_irg_pinned)(const ir_graph *irg) {
575 return __get_irg_pinned(irg);
579 (get_irg_outs_state)(const ir_graph *irg) {
580 return __get_irg_outs_state(irg);
584 (set_irg_outs_inconsistent)(ir_graph *irg) {
585 __set_irg_outs_inconsistent(irg);
589 (get_irg_dom_state)(const ir_graph *irg) {
590 return __get_irg_dom_state(irg);
594 (set_irg_dom_inconsistent)(ir_graph *irg) {
595 __set_irg_dom_inconsistent(irg);
599 (get_irg_loopinfo_state)(const ir_graph *irg) {
600 return __get_irg_loopinfo_state(irg);
604 (set_irg_loopinfo_state)(ir_graph *irg, irg_loopinfo_state s) {
605 __set_irg_loopinfo_state(irg, s);
609 set_irg_loopinfo_inconsistent(ir_graph *irg) {
610 if (irg->loopinfo_state == loopinfo_ip_consistent)
611 irg->loopinfo_state = loopinfo_ip_inconsistent;
613 else if (irg->loopinfo_state == loopinfo_consistent)
614 irg->loopinfo_state = loopinfo_inconsistent;
616 else if (irg->loopinfo_state == loopinfo_cf_ip_consistent)
617 irg->loopinfo_state = loopinfo_cf_ip_inconsistent;
619 else if (irg->loopinfo_state == loopinfo_cf_consistent)
620 irg->loopinfo_state = loopinfo_cf_inconsistent;
624 (set_irg_pinned)(ir_graph *irg, op_pin_state p) {
625 __set_irg_pinned(irg, p);
628 irg_callee_info_state
629 (get_irg_callee_info_state)(const ir_graph *irg) {
630 return __get_irg_callee_info_state(irg);
634 (set_irg_callee_info_state)(ir_graph *irg, irg_callee_info_state s) {
635 __set_irg_callee_info_state(irg, s);
639 (get_irg_inline_property)(const ir_graph *irg) {
640 return __get_irg_inline_property(irg);
644 (set_irg_inline_property)(ir_graph *irg, irg_inline_property s) {
645 __set_irg_inline_property(irg, s);
649 (set_irg_link)(ir_graph *irg, void *thing) {
650 __set_irg_link(irg, thing);
654 (get_irg_link)(const ir_graph *irg) {
655 return __get_irg_link(irg);
658 /* maximum visited flag content of all ir_graph visited fields. */
659 static int max_irg_visited = 0;
662 (get_irg_visited)(const ir_graph *irg) {
663 return __get_irg_visited(irg);
667 set_irg_visited (ir_graph *irg, unsigned long visited)
669 irg->visited = visited;
670 if (irg->visited > max_irg_visited) {
671 max_irg_visited = irg->visited;
676 inc_irg_visited (ir_graph *irg)
678 if (++irg->visited > max_irg_visited) {
679 max_irg_visited = irg->visited;
684 get_max_irg_visited(void)
688 for(i = 0; i < get_irp_n_irgs(); i++)
689 assert(max_irg_visited >= get_irg_visited(get_irp_irg(i)));
691 return max_irg_visited;
694 void set_max_irg_visited(int val) {
695 max_irg_visited = val;
699 inc_max_irg_visited(void)
703 for(i = 0; i < get_irp_n_irgs(); i++)
704 assert(max_irg_visited >= get_irg_visited(get_irp_irg(i)));
707 return max_irg_visited;
711 (get_irg_block_visited)(const ir_graph *irg) {
712 return __get_irg_block_visited(irg);
716 (set_irg_block_visited)(ir_graph *irg, unsigned long visited) {
717 __set_irg_block_visited(irg, visited);
721 (inc_irg_block_visited)(ir_graph *irg) {
722 __inc_irg_block_visited(irg);
727 * walker Start->End: places Proj nodes into the same block
728 * as it's predecessors
733 static void normalize_proj_walker(ir_node *n, void *env)
736 ir_node *pred = get_Proj_pred(n);
737 ir_node *block = get_nodes_block(pred);
739 set_nodes_block(n, block);
743 /* put the proj's into the same block as its predecessors */
744 void normalize_proj_nodes(ir_graph *irg)
746 irg_walk_graph(irg, NULL, normalize_proj_walker, NULL);
747 set_irg_outs_inconsistent(irg);
750 size_t register_additional_graph_data(size_t size)
752 assert(!forbid_new_data && "Too late to register additional node data");
757 return additional_graph_data_size += size;