3 * File name: ir/ir/irgraph.c
4 * Purpose: Entry point to the representation of procedure code.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
25 # include "irgraph_t.h"
26 # include "irprog_t.h"
27 # include "irnode_t.h"
29 # include "irflag_t.h"
38 * Indicates, whether additional data can be registered to graphs.
39 * If set to 1, this is not possible anymore.
41 static int forbid_new_data = 0;
44 * The amount of additional space for custom data to be allocated upon
45 * creating a new graph.
47 static size_t additional_graph_data_size = 0;
49 ir_graph *current_ir_graph;
50 ir_graph *get_current_ir_graph(void) {
51 return current_ir_graph;
53 void set_current_ir_graph(ir_graph *graph) {
54 current_ir_graph = graph;
58 int __interprocedural_view = false;
60 int (get_interprocedural_view)(void) {
61 return _get_interprocedural_view();
64 void (set_interprocedural_view)(int state) {
65 __interprocedural_view = state;
67 /* set function vectors for faster access */
69 __get_irn_arity = __get_irn_inter_arity;
70 __get_irn_n = __get_irn_inter_n;
73 __get_irn_arity = __get_irn_intra_arity;
74 __get_irn_n = __get_irn_intra_n;
78 static ident* frame_type_suffix = NULL;
79 void init_irgraph(void) {
80 frame_type_suffix = new_id_from_str(FRAME_TP_SUFFIX);
85 * Allocate a new ir graph.
86 * This function respects the registered graph data. The only reason for
87 * this function is, that there are two locations, where graphs are
88 * allocated (new_r_ir_graph, new_const_code_irg).
89 * @return Memory for a new graph.
91 ir_graph *alloc_graph(void)
93 size_t size = sizeof(ir_graph) + additional_graph_data_size;
94 char *ptr = xmalloc(size);
97 return (ir_graph *) (ptr + additional_graph_data_size);
100 #if USE_EXPLICIT_PHI_IN_STACK
101 /* really defined in ircons.c */
102 typedef struct Phi_in_stack Phi_in_stack;
103 Phi_in_stack *new_Phi_in_stack();
104 void free_Phi_in_stack(Phi_in_stack *s);
107 /* Allocates a list of nodes:
108 - The start block containing a start node and Proj nodes for it's four
109 results (X, M, P, Tuple).
110 - The end block containing an end node. This block is not matured after
111 new_ir_graph as predecessors need to be added to it.
112 - The current block, which is empty and also not matured.
113 Further it allocates several datastructures needed for graph construction
117 new_r_ir_graph (entity *ent, int n_loc)
120 ir_node *first_block;
124 res->kind = k_ir_graph;
126 /* inform statistics here, as blocks will be already build on this graph */
127 hook_new_graph(res, ent);
129 current_ir_graph = res;
131 /*-- initialized for each graph. --*/
132 if (get_opt_precise_exc_context()) {
133 res->n_loc = n_loc + 1 + 1; /* number of local variables that are never
134 dereferenced in this graph plus one for
135 the store plus one for links to fragile
136 operations. n_loc is not the number of
137 parameters to the procedure! */
140 res->n_loc = n_loc + 1; /* number of local variables that are never
141 dereferenced in this graph plus one for
142 the store. This is not the number of parameters
146 res->visited = 0; /* visited flag, for the ir walker */
147 res->block_visited = 0; /* visited flag, for the 'block'-walker */
149 #if USE_EXPLICIT_PHI_IN_STACK
150 res->Phi_in_stack = new_Phi_in_stack(); /* A stack needed for automatic Phi
153 res->kind = k_ir_graph;
154 res->obst = xmalloc (sizeof(*res->obst));
155 obstack_init (res->obst);
156 res->value_table = new_identities (); /* value table for global value
157 numbering for optimizing use in
161 res->phase_state = phase_building;
162 res->op_pin_state_pinned = op_pin_state_pinned;
163 res->outs_state = outs_none;
164 res->dom_state = dom_none;
165 res->typeinfo_state = irg_typeinfo_none;
166 res->loopinfo_state = loopinfo_none;
168 /*-- Type information for the procedure of the graph --*/
170 set_entity_irg(ent, res);
172 /*-- a class type so that it can contain "inner" methods as in Pascal. --*/
173 res->frame_type = new_type_class(mangle(get_entity_ident(ent), frame_type_suffix));
175 /* Remove type from type list. Must be treated differently than other types. */
176 remove_irp_type_from_list(res->frame_type);
178 /*-- Nodes needed in every graph --*/
179 res->end_block = new_immBlock();
180 res->end = new_End();
181 res->end_reg = res->end;
182 res->end_except = res->end;
184 res->start_block = new_immBlock();
185 res->start = new_Start();
186 res->bad = new_ir_node(NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
187 res->no_mem = new_ir_node(NULL, res, res->start_block, op_NoMem, mode_M, 0, NULL);
189 /* Proj results of start node */
190 projX = new_Proj (res->start, mode_X, pn_Start_X_initial_exec);
191 res->frame = new_Proj (res->start, mode_P_mach, pn_Start_P_frame_base);
192 res->globals = new_Proj (res->start, mode_P_mach, pn_Start_P_globals);
193 res->initial_mem = new_Proj (res->start, mode_M, pn_Start_M);
194 res->args = new_Proj (res->start, mode_T, pn_Start_T_args);
196 res->graph_nr = get_irp_new_node_nr();
198 res->proj_args = NULL;
200 set_store(res->initial_mem);
202 add_immBlock_pred(res->start_block, projX);
204 * The code generation needs it. leave it in now.
205 * Use of this edge is matter of discussion, unresolved. Also possible:
206 * add_immBlock_pred(res->start_block, res->start_block), but invalid typed.
208 mature_immBlock (res->current_block);
210 /*-- Make a block to start with --*/
211 first_block = new_immBlock();
212 add_immBlock_pred (first_block, projX);
214 res->method_execution_frequency = -1;
221 new_ir_graph (entity *ent, int n_loc)
223 ir_graph *res = new_r_ir_graph (ent, n_loc);
224 add_irp_irg(res); /* remember this graph global. */
228 /* Make a rudimentary ir graph for the constant code.
229 Must look like a correct irg, spare everything else. */
230 ir_graph *new_const_code_irg(void) {
236 /* inform statistics here, as blocks will be already build on this graph */
237 hook_new_graph(res, NULL);
239 current_ir_graph = res;
240 res->n_loc = 1; /* Only the memory. */
241 res->visited = 0; /* visited flag, for the ir walker */
242 res->block_visited=0; /* visited flag, for the 'block'-walker */
243 #if USE_EXPLICIT_PHI_IN_STACK
244 res->Phi_in_stack = NULL;
246 res->kind = k_ir_graph;
247 res->obst = xmalloc (sizeof(*res->obst));
248 obstack_init (res->obst);
249 res->phase_state = phase_building;
250 res->op_pin_state_pinned = op_pin_state_pinned;
251 res->value_table = new_identities (); /* value table for global value
252 numbering for optimizing use in
255 res->frame_type = NULL;
257 /* -- The end block -- */
258 res->end_block = new_immBlock ();
259 res->end = new_End ();
260 res->end_reg = res->end;
261 res->end_except = res->end;
262 mature_immBlock(get_cur_block()); /* mature the end block */
264 /* -- The start block -- */
265 res->start_block = new_immBlock ();
266 res->bad = new_ir_node (NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
267 res->no_mem = new_ir_node (NULL, res, res->start_block, op_NoMem, mode_M, 0, NULL);
268 res->start = new_Start ();
269 /* Proj results of start node */
270 res->initial_mem = new_Proj (res->start, mode_M, pn_Start_M);
271 projX = new_Proj (res->start, mode_X, pn_Start_X_initial_exec);
272 add_immBlock_pred (res->start_block, projX);
273 mature_immBlock (res->start_block); /* mature the start block */
275 add_immBlock_pred (new_immBlock (), projX);
276 mature_immBlock (get_cur_block()); /* mature the 'body' block for expressions */
278 /* Set the visited flag high enough that the blocks will never be visited. */
279 set_irn_visited(get_cur_block(), -1);
280 set_Block_block_visited(get_cur_block(), -1);
281 set_Block_block_visited(res->start_block, -1);
282 set_irn_visited(res->start_block, -1);
283 set_irn_visited(res->bad, -1);
284 set_irn_visited(res->no_mem, -1);
286 res->phase_state = phase_high;
290 /* Defined in iropt.c */
291 void del_identities (pset *value_table);
293 /* Frees the passed irgraph.
294 Deallocates all nodes in this graph and the ir_graph structure.
295 Sets the field irgraph in the corresponding entity to NULL.
296 Does not remove the irgraph from the list in irprog (requires
297 inefficient search, call remove_irp_irg by hand).
298 Does not free types, entities or modes that are used only by this
299 graph, nor the entity standing for this graph. */
300 void free_ir_graph (ir_graph *irg) {
302 hook_free_graph(irg);
303 if (irg->outs_state != outs_none) free_outs(irg);
304 if (irg->frame_type) free_type(irg->frame_type);
305 if (irg->value_table) del_identities(irg->value_table);
307 peculiarity pec = get_entity_peculiarity (irg->ent);
308 set_entity_peculiarity (irg->ent, peculiarity_description);
309 set_entity_irg(irg->ent, NULL); /* not set in const code irg */
310 set_entity_peculiarity (irg->ent, pec);
314 obstack_free(irg->obst,NULL);
316 #if USE_EXPLICIT_PHI_IN_STACK
317 free_Phi_in_stack(irg->Phi_in_stack);
323 /* access routines for all ir_graph attributes:
325 {attr type} get_irg_{attribute name} (ir_graph *irg);
326 void set_irg_{attr name} (ir_graph *irg, {attr type} {attr}); */
329 (is_ir_graph)(const void *thing) {
330 return _is_ir_graph(thing);
333 /* Outputs a unique number for this node */
336 get_irg_graph_nr(ir_graph *irg) {
339 return irg->graph_nr;
346 (get_irg_start_block)(const ir_graph *irg) {
347 return _get_irg_start_block(irg);
351 (set_irg_start_block)(ir_graph *irg, ir_node *node) {
352 _set_irg_start_block(irg, node);
356 (get_irg_start)(const ir_graph *irg) {
357 return _get_irg_start(irg);
361 (set_irg_start)(ir_graph *irg, ir_node *node) {
362 _set_irg_start(irg, node);
366 (get_irg_end_block)(const ir_graph *irg) {
367 return _get_irg_end_block(irg);
371 (set_irg_end_block)(ir_graph *irg, ir_node *node) {
372 _set_irg_end_block(irg, node);
376 (get_irg_end)(const ir_graph *irg) {
377 return _get_irg_end(irg);
381 (set_irg_end)(ir_graph *irg, ir_node *node) {
382 _set_irg_end(irg, node);
386 (get_irg_end_reg)(const ir_graph *irg) {
387 return _get_irg_end_reg(irg);
390 void set_irg_end_reg (ir_graph *irg, ir_node *node) {
391 assert(get_irn_op(node) == op_EndReg || get_irn_op(node) == op_End);
396 (get_irg_end_except)(const ir_graph *irg) {
397 return _get_irg_end_except(irg);
400 void set_irg_end_except (ir_graph *irg, ir_node *node) {
401 assert(get_irn_op(node) == op_EndExcept || get_irn_op(node) == op_End);
402 irg->end_except = node;
406 (get_irg_cstore)(const ir_graph *irg) {
407 return _get_irg_cstore(irg);
411 (set_irg_cstore)(ir_graph *irg, ir_node *node) {
412 _set_irg_cstore(irg, node);
416 (get_irg_frame)(const ir_graph *irg) {
417 return _get_irg_frame(irg);
421 (set_irg_frame)(ir_graph *irg, ir_node *node) {
422 _set_irg_frame(irg, node);
426 (get_irg_globals)(const ir_graph *irg) {
427 return _get_irg_globals(irg);
431 (set_irg_globals)(ir_graph *irg, ir_node *node) {
432 _set_irg_globals(irg, node);
436 (get_irg_initial_mem)(const ir_graph *irg) {
437 return _get_irg_initial_mem(irg);
441 (set_irg_initial_mem)(ir_graph *irg, ir_node *node) {
442 _set_irg_initial_mem(irg, node);
446 (get_irg_args)(const ir_graph *irg) {
447 return _get_irg_args(irg);
451 (set_irg_args)(ir_graph *irg, ir_node *node) {
452 _set_irg_args(irg, node);
456 (get_irg_proj_args) (const ir_graph *irg) {
457 return _get_irg_proj_args (irg);
461 (set_irg_proj_args) (ir_graph *irg, ir_node **nodes) {
462 _set_irg_proj_args (irg, nodes);
466 (get_irg_bad)(const ir_graph *irg) {
467 return _get_irg_bad(irg);
471 (set_irg_bad)(ir_graph *irg, ir_node *node) {
472 _set_irg_bad(irg, node);
476 (get_irg_no_mem)(const ir_graph *irg) {
477 return _get_irg_no_mem(irg);
481 (set_irg_no_mem)(ir_graph *irg, ir_node *node) {
482 _set_irg_no_mem(irg, node);
486 (get_irg_current_block)(const ir_graph *irg) {
487 return _get_irg_current_block(irg);
491 (set_irg_current_block)(ir_graph *irg, ir_node *node) {
492 _set_irg_current_block(irg, node);
496 (get_irg_entity)(const ir_graph *irg) {
497 return _get_irg_entity(irg);
501 (set_irg_entity)(ir_graph *irg, entity *ent) {
502 _set_irg_entity(irg, ent);
506 (get_irg_frame_type)(const ir_graph *irg) {
507 return _get_irg_frame_type(irg);
511 (set_irg_frame_type)(ir_graph *irg, type *ftp) {
512 _set_irg_frame_type(irg, ftp);
516 /* To test for a frame type */
518 is_frame_type(const type *ftp) {
520 if (is_Class_type(ftp)) {
521 for (i = 0; i < get_irp_n_irgs(); i++) {
522 const type *frame_tp = get_irg_frame_type(get_irp_irg(i));
523 if (ftp == frame_tp) return true;
530 get_irg_n_locs (ir_graph *irg)
532 if (get_opt_precise_exc_context())
533 return irg->n_loc - 1 - 1;
535 return irg->n_loc - 1;
539 set_irg_n_loc (ir_graph *irg, int n_loc)
541 if (get_opt_precise_exc_context())
542 irg->n_loc = n_loc + 1 + 1;
544 irg->n_loc = n_loc + 1;
549 /* Returns the obstack associated with the graph. */
551 (get_irg_obstack)(const ir_graph *irg) {
552 return _get_irg_obstack(irg);
556 * Returns true if the node n is allocated on the storage of graph irg.
558 * Implementation is GLIBC specific as is uses the internal _obstack_chunk implementation.
560 int node_is_in_irgs_storage(ir_graph *irg, ir_node *n)
562 struct _obstack_chunk *p;
565 * checks wheater the ir_node pointer i on the obstack.
566 * A more sophisticated check would test the "whole" ir_node
568 for (p = irg->obst->chunk; p; p = p->prev) {
569 if (((char *)p->contents <= (char *)n) && ((char *)n < (char *)p->limit))
577 (get_irg_phase_state)(const ir_graph *irg) {
578 return _get_irg_phase_state(irg);
582 (set_irg_phase_low)(ir_graph *irg) {
583 _set_irg_phase_low(irg);
587 (get_irg_pinned)(const ir_graph *irg) {
588 return _get_irg_pinned(irg);
592 (get_irg_outs_state)(const ir_graph *irg) {
593 return _get_irg_outs_state(irg);
597 (set_irg_outs_inconsistent)(ir_graph *irg) {
598 _set_irg_outs_inconsistent(irg);
602 (get_irg_dom_state)(const ir_graph *irg) {
603 return _get_irg_dom_state(irg);
607 (set_irg_dom_inconsistent)(ir_graph *irg) {
608 _set_irg_dom_inconsistent(irg);
612 (get_irg_loopinfo_state)(const ir_graph *irg) {
613 return _get_irg_loopinfo_state(irg);
617 (set_irg_loopinfo_state)(ir_graph *irg, irg_loopinfo_state s) {
618 _set_irg_loopinfo_state(irg, s);
622 set_irg_loopinfo_inconsistent(ir_graph *irg) {
623 if (irg->loopinfo_state == loopinfo_ip_consistent)
624 irg->loopinfo_state = loopinfo_ip_inconsistent;
626 else if (irg->loopinfo_state == loopinfo_consistent)
627 irg->loopinfo_state = loopinfo_inconsistent;
629 else if (irg->loopinfo_state == loopinfo_cf_ip_consistent)
630 irg->loopinfo_state = loopinfo_cf_ip_inconsistent;
632 else if (irg->loopinfo_state == loopinfo_cf_consistent)
633 irg->loopinfo_state = loopinfo_cf_inconsistent;
637 (set_irg_pinned)(ir_graph *irg, op_pin_state p) {
638 _set_irg_pinned(irg, p);
641 irg_callee_info_state
642 (get_irg_callee_info_state)(const ir_graph *irg) {
643 return _get_irg_callee_info_state(irg);
647 (set_irg_callee_info_state)(ir_graph *irg, irg_callee_info_state s) {
648 _set_irg_callee_info_state(irg, s);
652 (get_irg_inline_property)(const ir_graph *irg) {
653 return _get_irg_inline_property(irg);
657 (set_irg_inline_property)(ir_graph *irg, irg_inline_property s) {
658 _set_irg_inline_property(irg, s);
662 (set_irg_link)(ir_graph *irg, void *thing) {
663 _set_irg_link(irg, thing);
667 (get_irg_link)(const ir_graph *irg) {
668 return _get_irg_link(irg);
671 /** maximum visited flag content of all ir_graph visited fields. */
672 static unsigned long max_irg_visited = 0;
675 (get_irg_visited)(const ir_graph *irg) {
676 return _get_irg_visited(irg);
680 set_irg_visited (ir_graph *irg, unsigned long visited)
682 irg->visited = visited;
683 if (irg->visited > max_irg_visited) {
684 max_irg_visited = irg->visited;
689 inc_irg_visited (ir_graph *irg)
691 if (++irg->visited > max_irg_visited) {
692 max_irg_visited = irg->visited;
697 get_max_irg_visited(void)
701 for(i = 0; i < get_irp_n_irgs(); i++)
702 assert(max_irg_visited >= get_irg_visited(get_irp_irg(i)));
704 return max_irg_visited;
707 void set_max_irg_visited(int val) {
708 max_irg_visited = val;
712 inc_max_irg_visited(void)
716 for(i = 0; i < get_irp_n_irgs(); i++)
717 assert(max_irg_visited >= get_irg_visited(get_irp_irg(i)));
720 return max_irg_visited;
724 (get_irg_block_visited)(const ir_graph *irg) {
725 return _get_irg_block_visited(irg);
729 (set_irg_block_visited)(ir_graph *irg, unsigned long visited) {
730 _set_irg_block_visited(irg, visited);
734 (inc_irg_block_visited)(ir_graph *irg) {
735 _inc_irg_block_visited(irg);
740 * walker Start->End: places Proj nodes into the same block
741 * as it's predecessors
746 static void normalize_proj_walker(ir_node *n, void *env)
749 ir_node *pred = get_Proj_pred(n);
750 ir_node *block = get_nodes_block(pred);
752 set_nodes_block(n, block);
756 /* put the proj's into the same block as its predecessors */
757 void normalize_proj_nodes(ir_graph *irg)
759 irg_walk_graph(irg, NULL, normalize_proj_walker, NULL);
760 set_irg_outs_inconsistent(irg);
763 size_t register_additional_graph_data(size_t size)
765 assert(!forbid_new_data && "Too late to register additional node data");
770 return additional_graph_data_size += size;