SOURCES = Makefile.in array.c array.h cookies.h debug.c debug.h host.h obst.h \
- pdeq.c pdeq.h pset.h set.c set.h xmalloc.c
+ pdeq.c pdeq.h pset.h set.c set.h xmalloc.c pmap.h pmap.c eset.h eset.c
include $(topdir)/MakeRules
topdir = ../..
subdir := ir/ana
-INSTALL_HEADERS = irouts.h irdom.h
+INSTALL_HEADERS = irouts.h irdom.h cgana.h
SOURCES = $(INSTALL_HEADERS)
SOURCES += Makefile.in \
- irouts.c irdom_t.h irdom.c
+ irouts.c irdom_t.h irdom.c cgana.c
include $(topdir)/MakeRules
--- /dev/null
+/* -------------------------------------------------------------------
+ * $Id$
+ * -------------------------------------------------------------------
+ * Intraprozedurale Analyse zur Abschätzung der Aufrulrelation. Es
+ * wird eine Menge von freien Methoden und anschließend die an den
+ * Call-Operationen aufrufbaren Methoden bestimmt.
+ *
+ * Erstellt: Hubert Schmid, 09.06.2002
+ * ---------------------------------------------------------------- */
+
+
+#include "cgana.h"
+
+
+#include "eset.h"
+#include "pmap.h"
+#include "array.h"
+#include "irprog.h"
+#include "irgwalk.h"
+#include "ircons.h"
+#include "irgmod.h"
+
+
+/* Eindeutige Adresse zur Markierung von besuchten Knoten und zur
+ * Darstellung der unbekannten Methode. */
+static void * MARK = &MARK;
+
+
+
+/* --- sel methods ---------------------------------------------------------- */
+
+
+static eset * entities = NULL;
+
+
+/* Bestimmt die eindeutige Methode, die die Methode für den
+ * übergebenene (dynamischen) Typ überschreibt. */
+static entity * get_implementation(type * class, entity * method) {
+ int i;
+ if (get_entity_peculiarity(method) != description && get_entity_owner(method) == class) {
+ return method;
+ }
+ for (i = get_entity_n_overwrittenby(method) - 1; i >= 0; --i) {
+ entity * e = get_entity_overwrittenby(method, i);
+ if (get_entity_peculiarity(e) != description && get_entity_owner(e) == class) {
+ return e;
+ }
+ }
+ for (i = get_class_n_supertype(class) - 1; i >= 0; --i) {
+ entity * e = get_implementation(get_class_supertype(class, i), method);
+ if (e) {
+ return e;
+ }
+ }
+ assert(0 && "implemenation not found");
+}
+
+
+/* Alle Methoden bestimmen, die die übergebene Methode überschreiben
+ * (und implementieren). In der zurückgegebenen Reihung kommt jede
+ * Methode nur einmal vor. Der Wert 'NULL' steht für unbekannte
+ * (externe) Methoden. Die zurückgegebene Reihung muß vom Aufrufer
+ * wieder freigegeben werden (siehe "DEL_ARR_F"). Gibt es überhaupt
+ * keine Methoden, die die "method" überschreiben, so gibt die Methode
+ * "NULL" zurück. */
+static entity ** get_impl_methods(entity * method) {
+ eset * set = eset_create();
+ int size = 0;
+ int i;
+ entity ** arr;
+ bool open = false;
+ if (get_entity_peculiarity(method) == existent) {
+ if (get_entity_visibility(method) == external_allocated) {
+ assert(get_entity_irg(method) == NULL);
+ open = true;
+ } else {
+ assert(get_entity_irg(method) != NULL);
+ eset_insert(set, method);
+ ++size;
+ }
+ }
+ for (i = get_entity_n_overwrittenby(method) - 1; i >= 0; --i) {
+ entity * ent = get_entity_overwrittenby(method, i);
+ if (get_entity_peculiarity(ent) == existent) {
+ if (get_entity_visibility(ent) == external_allocated) {
+ assert(get_entity_irg(ent) == NULL);
+ open = true;
+ } else {
+ assert(get_entity_irg(ent) != NULL);
+ if (!eset_contains(set, ent)) {
+ eset_insert(set, ent);
+ ++size;
+ }
+ }
+ }
+ }
+ if (size == 0 && !open) {
+ /* keine implementierte überschriebene Methode */
+ arr = NULL;
+ } else if (open) {
+ entity * ent;
+ arr = NEW_ARR_F(entity *, size + 1);
+ arr[0] = NULL;
+ for (ent = eset_first(set); size > 0; ent = eset_next(set), --size) arr[size] = ent;
+ } else {
+ entity * ent;
+ arr = NEW_ARR_F(entity *, size);
+ for (size -= 1, ent = eset_first(set); size >= 0; ent = eset_next(set), --size) arr[size] = ent;
+ }
+ eset_destroy(set);
+ return arr;
+}
+
+
+static void sel_methods_walker(ir_node * node, pmap * ldname_map) {
+ if (get_irn_op(node) == op_SymConst) {
+ /* Wenn möglich SymConst-Operation durch Const-Operation
+ * ersetzen. */
+ if (get_SymConst_kind(node) == linkage_ptr_info) {
+ pmap_entry * entry = pmap_find(ldname_map, (void *) get_SymConst_ptrinfo(node));
+ if (entry != NULL) {
+ entity * ent = entry->value;
+ if (get_entity_visibility(ent) != external_allocated) {
+ assert(get_entity_irg(ent));
+ set_irg_current_block(current_ir_graph, get_nodes_Block(node));
+ exchange(node, new_Const(mode_p, tarval_p_from_entity(ent)));
+ }
+ }
+ }
+ } else if (get_irn_op(node) == op_Sel && is_method_type(get_entity_type(get_Sel_entity(node)))) {
+ entity * ent = get_Sel_entity(node);
+ if (get_irn_op(skip_Proj(get_Sel_ptr(node))) == op_Alloc) {
+ ent = get_implementation(get_Alloc_type(skip_Proj(get_Sel_ptr(node))), ent);
+ if (get_entity_visibility(ent) == external_allocated) {
+ exchange(node, new_SymConst((type_or_id_p) get_entity_ld_ident(ent), linkage_ptr_info));
+ } else {
+ exchange(node, new_Const(mode_p, tarval_p_from_entity(ent)));
+ }
+ } else {
+ if (!eset_contains(entities, ent)) {
+ /* Entity noch nicht behandelt. Alle (intern oder extern)
+ * implementierten Methoden suchen, die diese Entity
+ * überschreiben. Die Menge an entity.link speichern. */
+ set_entity_link(ent, get_impl_methods(ent));
+ eset_insert(entities, ent);
+ }
+ if (get_entity_link(ent) == NULL) {
+ /* Die Sel-Operation kann nie einen Zeiger auf eine aufrufbare
+ * Methode zurückgeben. Damit ist sie insbesondere nicht
+ * ausführbar und nicht erreichbar. */
+ exchange(node, new_Bad());
+ } else {
+ entity ** arr = get_entity_link(ent);
+ if (ARR_LEN(arr) == 1 && arr[0] != NULL) {
+ /* Die Sel-Operation kann immer nur einen Wert auf eine
+ * interne Methode zurückgeben. Wir können daher die
+ * Sel-Operation durch eine Const- bzw. SymConst-Operation
+ * ersetzen. */
+ if (get_entity_visibility(arr[0]) == external_allocated) {
+ exchange(node, new_SymConst((type_or_id_p) get_entity_ld_ident(arr[0]), linkage_ptr_info));
+ } else {
+ exchange(node, new_Const(mode_p, tarval_p_from_entity(arr[0])));
+ }
+ }
+ }
+ }
+ }
+}
+
+
+/* Datenstruktur initialisieren. Zusätzlich werden alle
+ * SymConst-Operationen, die auf interne Methode verweisen, durch
+ * Const-Operationen ersetzt. */
+static void sel_methods_init(void) {
+ int i;
+ pmap * ldname_map = pmap_create();
+ assert(entities == NULL);
+ entities = eset_create();
+ for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ entity * ent = get_irg_ent(get_irp_irg(i));
+ /* Nur extern sichtbare Methode können überhaupt mit SymConst
+ * aufgerufen werden. */
+ if (get_entity_visibility(ent) != local) {
+ pmap_insert(ldname_map, (void *) get_entity_ld_ident(ent), ent);
+ }
+ }
+ all_irg_walk((irg_walk_func) sel_methods_walker, NULL, ldname_map);
+ pmap_destroy(ldname_map);
+}
+
+
+/* Datenstruktur freigeben. */
+static void sel_methods_dispose(void) {
+ entity * ent;
+ assert(entities);
+ for (ent = eset_first(entities); ent; ent = eset_next(entities)) {
+ entity ** arr = get_entity_link(ent);
+ if (arr) {
+ DEL_ARR_F(arr);
+ }
+ set_entity_link(ent, NULL);
+ }
+ eset_destroy(entities);
+ entities = NULL;
+}
+
+
+/* Gibt die Menge aller Methoden zurück, die an diesem Sel-Knoten
+ * zurückgegeben werden können. Die Liste enthält keine doppelten
+ * Einträge. */
+static entity ** get_Sel_arr(ir_node * sel) {
+ static entity ** NULL_ARRAY = NULL;
+ entity * ent;
+ entity ** arr;
+ assert(sel && get_irn_op(sel) == op_Sel);
+ ent = get_Sel_entity(sel);
+ assert(is_method_type(get_entity_type(ent))); /* what else? */
+ arr = get_entity_link(ent);
+ if (arr) {
+ return arr;
+ } else {
+ /* "NULL" zeigt an, dass keine Implementierung existiert. Dies
+ * kann für polymorphe (abstrakte) Methoden passieren. */
+ if (!NULL_ARRAY) {
+ NULL_ARRAY = NEW_ARR_F(entity *, 0);
+ }
+ return NULL_ARRAY;
+ }
+}
+
+
+static int get_Sel_n_methods(ir_node * sel) {
+ return ARR_LEN(get_Sel_arr(sel));
+}
+
+
+static entity * get_Sel_method(ir_node * sel, int pos) {
+ entity ** arr = get_Sel_arr(sel);
+ assert(pos >= 0 && pos < ARR_LEN(arr));
+ return arr[pos];
+}
+
+
+
+/* --- callee analysis ------------------------------------------------------ */
+
+
+static void callee_ana_node(ir_node * node, eset * methods);
+
+
+static void callee_ana_proj(ir_node * node, long n, eset * methods) {
+ assert(get_irn_mode(node) == mode_T);
+ if (get_irn_link(node) == MARK) {
+ /* already visited */
+ return;
+ }
+ set_irn_link(node, MARK);
+
+ switch (get_irn_opcode(node)) {
+ case iro_Proj: {
+ /* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein
+ * op_Tuple oder ein Knoten, der eine "freie Methode"
+ * zurückgibt. */
+ ir_node * pred = get_Proj_pred(node);
+ if (get_irn_link(pred) != MARK) {
+ if (get_irn_op(pred) == op_Tuple) {
+ callee_ana_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, methods);
+ } else {
+ eset_insert(methods, MARK); /* free method -> unknown */
+ }
+ }
+ break;
+ }
+
+ case iro_Tuple:
+ callee_ana_node(get_Tuple_pred(node, n), methods);
+ break;
+
+ case iro_Id:
+ callee_ana_proj(get_Id_pred(node), n, methods);
+ break;
+
+ default:
+ eset_insert(methods, MARK); /* free method -> unknown */
+ break;
+ }
+
+ set_irn_link(node, NULL);
+}
+
+
+static void callee_ana_node(ir_node * node, eset * methods) {
+ int i;
+
+ assert(get_irn_mode(node) == mode_p);
+ /* rekursion verhindern */
+ if (get_irn_link(node) == MARK) {
+ /* already visited */
+ return;
+ }
+ set_irn_link(node, MARK);
+
+ switch (get_irn_opcode(node)) {
+ case iro_SymConst:
+ /* externe Methode (wegen fix_symconst!) */
+ eset_insert(methods, MARK); /* free method -> unknown */
+ break;
+
+ case iro_Const: {
+ /* interne Methode */
+ entity * ent = get_Const_tarval(node)->u.p.ent;
+ assert(ent && is_method_type(get_entity_type(ent)));
+ if (get_entity_visibility(ent) != external_allocated) {
+ assert(get_entity_irg(ent));
+ eset_insert(methods, ent);
+ } else {
+ eset_insert(methods, MARK); /* free method -> unknown */
+ }
+ break;
+ }
+
+ case iro_Sel:
+ /* polymorphe Methode */
+ for (i = get_Sel_n_methods(node) - 1; i >= 0; --i) {
+ entity * ent = get_Sel_method(node, i);
+ if (ent) {
+ eset_insert(methods, ent);
+ } else {
+ eset_insert(methods, MARK);
+ }
+ }
+ break;
+
+ case iro_Bad:
+ /* nothing */
+ break;
+
+ case iro_Phi: /* Vereinigung */
+ for (i = get_Phi_n_preds(node) - 1; i >= 0; --i) {
+ callee_ana_node(get_Phi_pred(node, i), methods);
+ }
+ break;
+
+ case iro_Id:
+ callee_ana_node(get_Id_pred(node), methods);
+ break;
+
+ case iro_Proj:
+ callee_ana_proj(get_Proj_pred(node), get_Proj_proj(node), methods);
+ break;
+
+ case iro_Add:
+ case iro_Sub:
+ case iro_Conv:
+ /* extern */
+ eset_insert(methods, MARK); /* free method -> unknown */
+ break;
+
+ default:
+ assert(0 && "invalid opcode or opcode not implemented");
+ break;
+ }
+
+ set_irn_link(node, NULL);
+}
+
+
+static void callee_walker(ir_node * call, void * env) {
+ if (get_irn_op(call) == op_Call) {
+ eset * methods = eset_create();
+ entity * ent;
+ entity ** arr = NEW_ARR_F(entity *, 0);
+ callee_ana_node(skip_nop(get_Call_ptr(call)), methods);
+ if (eset_contains(methods, MARK)) { /* unknown method */
+ ARR_APP1(entity *, arr, NULL);
+ }
+ for (ent = eset_first(methods); ent; ent = eset_next(methods)) {
+ if (ent != MARK) {
+ ARR_APP1(entity *, arr, ent);
+ }
+ }
+ if (ARR_LEN(arr) == 0) {
+ /* Kann vorkommen, wenn der Vorgänger beispielsweise eine
+ * Sel-Operation war, die keine Methoden zurückgeben
+ * konnte. Wir ersetzen die Call-Operation ebenfalls durch
+ * eine Bad-Operation. Die Verlinkung muss wiederhergestellt
+ * werden! */
+ exchange(call, new_Bad());
+ } else {
+ set_Call_callee_arr(call, ARR_LEN(arr), arr);
+ }
+ DEL_ARR_F(arr);
+ eset_destroy(methods);
+ }
+}
+
+
+/* Bestimmt für jede Call-Operation die Menge der aufrufbaren Methode
+ * und speichert das Ergebnis in der Call-Operation. (siehe
+ * "set_Call_callee"). "sel_methods" wird für den Aufbau benötigt und
+ * muss bereits aufgebaut sein. */
+static void callee_ana(void) {
+ int i;
+ /* Alle Graphen analysieren. */
+ for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ irg_walk_graph(get_irp_irg(i), callee_walker, NULL, NULL);
+ }
+}
+
+
+
+/* --- free method analysis ------------------------------------------------- */
+
+
+static void free_mark(ir_node * node, eset * set);
+
+static void free_mark_proj(ir_node * node, long n, eset * set) {
+ assert(get_irn_mode(node) == mode_T);
+ if (get_irn_link(node) == MARK) {
+ /* already visited */
+ return;
+ }
+ set_irn_link(node, MARK);
+ switch (get_irn_opcode(node)) {
+ case iro_Proj: {
+ /* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein
+ * op_Tuple oder ein Knoten, der in "free_ana_walker" behandelt
+ * wird. */
+ ir_node * pred = get_Proj_pred(node);
+ if (get_irn_link(pred) != MARK && get_irn_op(pred) == op_Tuple) {
+ free_mark_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, set);
+ } else {
+ /* nothing: da in "free_ana_walker" behandelt. */
+ }
+ break;
+ }
+
+ case iro_Tuple:
+ free_mark(get_Tuple_pred(node, n), set);
+ break;
+
+ case iro_Id:
+ free_mark_proj(get_Id_pred(node), n, set);
+ break;
+
+ case iro_Start:
+ case iro_Alloc:
+ case iro_Load:
+ /* nothing: Die Operationen werden in "free_ana_walker" selbst
+ * behandelt. */
+ break;
+
+ default:
+ assert(0 && "unexpected opcode or opcode not implemented");
+ break;
+ }
+ set_irn_link(node, NULL);
+}
+
+
+static void free_mark(ir_node * node, eset * set) {
+ int i;
+ assert(get_irn_mode(node) == mode_p);
+ if (get_irn_link(node) == MARK) {
+ return; /* already visited */
+ }
+ set_irn_link(node, MARK);
+ switch (get_irn_opcode(node)) {
+ case iro_Sel: {
+ entity * ent = get_Sel_entity(node);
+ if (is_method_type(get_entity_type(ent))) {
+ for (i = get_Sel_n_methods(node) - 1; i >= 0; --i) {
+ eset_insert(set, get_Sel_method(node, i));
+ }
+ }
+ break;
+ }
+ case iro_SymConst:
+ /* nothing: SymConst points to extern method */
+ break;
+ case iro_Const: {
+ tarval * val = get_Const_tarval(node);
+ entity * ent = val->u.p.ent;
+ if (ent != NULL && is_method_type(get_entity_type(ent))) {
+ eset_insert(set, ent);
+ }
+ break;
+ }
+ case iro_Phi:
+ for (i = get_Phi_n_preds(node) - 1; i >= 0; --i) {
+ free_mark(get_Phi_pred(node, i), set);
+ }
+ break;
+ case iro_Id:
+ free_mark(get_Id_pred(node), set);
+ break;
+ case iro_Proj:
+ free_mark_proj(get_Proj_pred(node), get_Proj_proj(node), set);
+ break;
+ default:
+ /* nothing: Wird unten behandelt! */
+ break;
+ }
+ set_irn_link(node, NULL);
+}
+
+
+static void free_ana_walker(ir_node * node, eset * set) {
+ int i;
+ if (get_irn_link(node) == MARK) {
+ /* bereits in einem Zyklus besucht. */
+ return;
+ }
+ switch (get_irn_opcode(node)) {
+ /* special nodes */
+ case iro_Sel:
+ case iro_SymConst:
+ case iro_Const:
+ case iro_Phi:
+ case iro_Id:
+ case iro_Proj:
+ case iro_Tuple:
+ /* nothing */
+ break;
+ /* Sonderbehandlung, da der Funktionszeigereingang natürlich kein
+ * Verräter ist. */
+ case iro_Call:
+ set_irn_link(node, MARK);
+ for (i = get_Call_arity(node) - 1; i >= 0; --i) {
+ ir_node * pred = get_Call_param(node, i);
+ if (get_irn_mode(pred) == mode_p) {
+ free_mark(pred, set);
+ }
+ }
+ break;
+ /* other nodes: Alle anderen Knoten nehmen wir als Verräter an, bis
+ * jemand das Gegenteil implementiert. */
+ default:
+ set_irn_link(node, MARK);
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node * pred = get_irn_n(node, i);
+ if (get_irn_mode(pred) == mode_p) {
+ free_mark(pred, set);
+ }
+ }
+ break;
+ }
+ set_irn_link(node, NULL);
+}
+
+
+/* Die Datenstrukturen für sel-Methoden (sel_methods) muß vor dem
+ * Aufruf von "get_free_methods" aufgebaut sein. Die (internen)
+ * SymConst-Operationen müssen in passende Const-Operationen
+ * umgewandelt worden sein, d.h. SymConst-Operationen verweisen immer
+ * auf eine echt externe Methode. */
+static entity ** get_free_methods(void) {
+ eset * set = eset_create();
+ int i;
+ entity ** arr = NEW_ARR_F(entity *, 0);
+ entity * ent;
+ for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ ir_graph * irg = get_irp_irg(i);
+ entity * ent = get_irg_ent(irg);
+ /* insert "external visible" methods. */
+ if (get_entity_visibility(ent) != local) {
+ eset_insert(set, ent);
+ }
+ irg_walk_graph(irg, NULL, (irg_walk_func) free_ana_walker, set);
+ }
+ /* Hauptprogramm ist auch frei, auch wenn es nicht "external
+ * visible" ist. */
+ eset_insert(set, get_irg_ent(get_irp_main_irg()));
+ for (ent = eset_first(set); ent; ent = eset_next(set)) {
+ ARR_APP1(entity *, arr, ent);
+ }
+ eset_destroy(set);
+
+ return arr;
+}
+
+void cgana(int *length, entity ***free_methods) {
+ entity ** free_meths;
+ int i;
+
+ sel_methods_init();
+ free_meths = get_free_methods();
+ callee_ana();
+ sel_methods_dispose();
+
+ /* Convert the flexible array to an array that can be handled
+ by standard C. */
+ *length = ARR_LEN(free_meths);
+ *free_methods = (entity **)malloc(sizeof(entity *) * (*length));
+ for (i = 0; i < (*length); i++) (*free_methods)[i] = free_meths[i];
+ DEL_ARR_F(free_meths);
+}
#define USE_EXPLICIT_PHI_IN_STACK 1
/*
-/* If this is defined debuging aids are created, e.g. a field in
+ * If this is defined debuging aids are created, e.g. a field in
* ir_node uniquely numbering the nodes.
* #define DEBUG_libfirm 1
* This is now set by the configure script as an option.
INSTALL_HEADERS = irprog.h irgraph.h irnode.h irmode.h irop.h ircons.h \
irflag.h irvrfy.h irgwalk.h irgmod.h iropt.h irdump.h \
- irgopt.h old_fctnames.h
+ irgopt.h old_fctnames.h ircgcons.h ircgopt.h
SOURCES = $(INSTALL_HEADERS)
ircons.c irgmod.c irgraph_t.h irnode.c iropt.c irvrfy.c \
irgwalk.c irdump.c irgopt.c irnode_t.h iropt_t.h \
irmode.c irop.c irprog.c irflag.c irgraph.c irmode_t.h \
- irop_t.h irprog_t.h iropt_dbg.c
+ irop_t.h irprog_t.h iropt_dbg.c ircgcons.c ircgopt.c
include $(topdir)/MakeRules
set_Block_block_visited(res, 0);
res->attr.block.exc = exc_normal;
+ res->attr.block.in_cg = NULL;
irn_vrfy (res);
return res;
assert(is_method_type(type));
set_Call_type(res, type);
+ res->attr.call.callee_arr = NULL;
res = optimize (res);
irn_vrfy (res);
return res;
return current_ir_graph->bad;
}
+ir_node *
+new_rd_Unknown ()
+{
+ return current_ir_graph->unknown;
+}
+
+ir_node *
+new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
+{
+ ir_node *in[1] = { get_Call_ptr(call) };
+ ir_node *res;
+ res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
+ res->attr.callbegin.irg = irg;
+ res->attr.callbegin.call = call;
+ res = optimize (res);
+ irn_vrfy (res);
+ return res;
+}
+
+ir_node *
+new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
+{
+ ir_node *res;
+
+ res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
+ res->attr.end.irg = irg;
+
+ irn_vrfy (res);
+ return res;
+}
+
+ir_node *
+new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
+{
+ ir_node *res;
+
+ res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
+ res->attr.end.irg = irg;
+
+ irn_vrfy (res);
+ return res;
+}
+
+inline ir_node *
+new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
+{
+ ir_node *in[0] = {};
+ ir_node *res;
+ res = new_ir_node (db, irg, block, op_Break, mode_X, 0, in);
+ res = optimize (res);
+ irn_vrfy (res);
+ return res;
+}
+
+ir_node *
+new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
+ long proj)
+{
+ ir_node *in[1] = {arg};
+ ir_node *res;
+ res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
+ res->attr.filter.proj = proj;
+ res->attr.filter.in_cg = NULL;
+
+ assert(res);
+ assert(get_Proj_pred(res));
+ assert(get_nodes_Block(get_Proj_pred(res)));
+
+ res = optimize (res);
+
+ irn_vrfy (res);
+ return res;
+
+}
+
ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
return new_rd_Block(NULL, irg, arity, in);
}
ir_node *new_r_Bad () {
return new_rd_Bad();
}
+ir_node *new_r_Unknown () {
+ return new_rd_Unknown();
+}
+ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
+ return new_rd_CallBegin(NULL, irg, block, callee);
+}
+ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
+ return new_rd_EndReg(NULL, irg, block);
+}
+ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
+ return new_rd_EndExcept(NULL, irg, block);
+}
+ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
+ return new_rd_Break(NULL, irg, block);
+}
+ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
+ ir_mode *mode, long proj) {
+ return new_rd_Filter(NULL, irg, block, arg, mode, proj);
+}
+
/** ********************/
/** public interfaces */
}
}
-inline ir_node *
+void
set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
if (!frag_arr[pos]) frag_arr[pos] = val;
if (frag_arr[current_ir_graph->n_loc - 1])
inline ir_node *
get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
ir_node *res;
- ir_node **rem;
ir_node **frag_arr;
assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
return current_ir_graph->bad;
}
+ir_node *
+new_d_Unknown (void)
+{
+ return current_ir_graph->unknown;
+}
+
+ir_node *
+new_d_CallBegin (dbg_info *db, ir_node *call)
+{
+ ir_node *res;
+ res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
+ return res;
+}
+
+ir_node *
+new_d_EndReg (dbg_info *db)
+{
+ ir_node *res;
+ res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
+ return res;
+}
+
+ir_node *
+new_d_EndExcept (dbg_info *db)
+{
+ ir_node *res;
+ res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
+ return res;
+}
+
+ir_node *
+new_d_Break (dbg_info *db)
+{
+ return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
+}
+
+ir_node *
+new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
+{
+ return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
+ arg, mode, proj);
+}
+
/* ********************************************************************* */
/* Comfortable interface with automatic Phi node construction. */
/* (Uses also constructors of ?? interface, except new_Block. */
current_ir_graph->current_block = res;
res->attr.block.matured = 0;
res->attr.block.exc = exc_normal;
+ res->attr.block.in_cg = NULL;
set_Block_block_visited(res, 0);
/* Create and initialize array for Phi-node construction. */
ir_node *new_Bad (void) {
return new_d_Bad();
}
+ir_node *new_Unknown(void) {
+ return new_d_Unknown();
+}
+ir_node *new_CallBegin (ir_node *callee) {
+ return new_d_CallBegin(NULL, callee);
+}
+ir_node *new_EndReg (void) {
+ return new_d_EndReg(NULL);
+}
+ir_node *new_EndExcept (void) {
+ return new_d_EndExcept(NULL);
+}
+ir_node *new_Break (void) {
+ return new_d_Break(NULL);
+}
+ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
+ return new_d_Filter(NULL, arg, mode, proj);
+}
ir_node *new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block,
ir_node *val, ir_mode *mode);
ir_node *new_rd_Bad ();
+ir_node *new_rd_Unknown();
+ir_node *new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *callee);
+ir_node *new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block);
+ir_node *new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block);
+ir_node *new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block);
+ir_node *new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
+ ir_mode *mode, long proj);
/***************************************************************************/
/* The raw interface without debug support */
ir_node *new_r_Id (ir_graph *irg, ir_node *block,
ir_node *val, ir_mode *mode);
ir_node *new_r_Bad ();
+ir_node *new_r_Unknown();
+ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee);
+ir_node *new_r_EndReg (ir_graph *irg, ir_node *block);
+ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block);
+ir_node *new_r_Break (ir_graph *irg, ir_node *block);
+ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
+ ir_mode *mode, long proj);
/*************************************************************************/
/* The block oriented interface */
ir_node *new_d_Tuple (dbg_info* db, int arity, ir_node **in);
ir_node *new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode);
ir_node *new_d_Bad (void);
+ir_node *new_d_Unknown(void);
+ir_node *new_d_CallBegin(dbg_info *db, ir_node *callee);
+ir_node *new_d_EndReg (dbg_info *db);
+ir_node *new_d_EndExcept(dbg_info *db);
+ir_node *new_d_Break (dbg_info *db);
+ir_node *new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj);
/*************************************************************************/
/* The block oriented interface without debug support */
ir_node *new_Tuple (int arity, ir_node **in);
ir_node *new_Id (ir_node *val, ir_mode *mode);
ir_node *new_Bad (void);
+ir_node *new_Unknown(void);
+ir_node *new_CallBegin(ir_node *callee);
+ir_node *new_EndReg (void);
+ir_node *new_EndExcept(void);
+ir_node *new_Break (void);
+ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj);
/***********************************************************************/
/* The comfortable interface. */
# include "exc.h"
+# include "pmap.h"
+
/* Attributes of nodes */
#define DEFAULT_NODE_ATTR ""
#define DEFAULT_TYPE_ATTRIBUTE ""
#if DEBUG_libfirm && NODEID_AS_LABEL
-#define PRINT_NODEID(X) fprintf(F, "%d", get_irn_node_nr(X))
+#define PRINT_NODEID(X) fprintf(F, "%ld", get_irn_node_nr(X))
#else
#define PRINT_NODEID(X) fprintf(F, "%p", X)
#endif
inline void
dump_node_opcode (ir_node *n)
{
+ assert(n && n->op);
/* Const */
if (n->op->code == iro_Const) {
else
xfprintf (F, "size");
}
+
+ /* Filter */
+ } else if (n->op->code == iro_Filter && !interprocedural_view) {
+ fprintf(F, "Proj'");
+
/* all others */
} else {
xfprintf (F, "%I", get_irn_opident(n));
case iro_Const:
case iro_Id:
case iro_Proj:
+ case iro_Filter:
case iro_Conv:
case iro_Tuple:
case iro_Add:
xfprintf (F, "%ld", n->attr.proj);
}
break;
+ case iro_Filter:
+ xfprintf (F, "%ld", n->attr.filter.proj);
+ break;
case iro_Sel: {
assert(get_kind(get_Sel_entity(n)) == k_entity);
xfprintf (F, "%I", get_entity_ident(get_Sel_entity(n)));
{
switch (n->op->code) {
case iro_Start:
+ case iro_EndReg:
+ /* fall through */
+ case iro_EndExcept:
+ /* fall through */
case iro_End:
xfprintf (F, "color: blue");
break;
break;
case iro_Const:
case iro_Proj:
+ case iro_Filter:
case iro_Tuple:
xfprintf (F, "color: yellow");
break;
xfprintf (F, "\"%I\" color: blue ", get_irn_opident(n));
xfprintf (F, DEFAULT_NODE_ATTR);
break;
+ case iro_EndReg:
+ /* fall through */
+ case iro_EndExcept:
+ /* fall through */
case iro_End:
xfprintf (F, "\"%I\" color: blue ", get_irn_opident(n));
xfprintf (F, DEFAULT_NODE_ATTR);
}
xfprintf (F, DEFAULT_NODE_ATTR);
break;
+ case iro_Filter:
+ xfprintf (F, "\"%I%I %ld\"", get_irn_opident(n), get_irn_modeident(n), n->attr.filter.proj);
+ xfprintf (F, DEFAULT_NODE_ATTR);
+ break;
case iro_Conv:
xfprintf (F, "\"%I%I\"", get_irn_opident(n), get_irn_modeident(n));
xfprintf (F, DEFAULT_NODE_ATTR);
xfprintf (F, "\"%I\"", get_irn_opident(n));
xfprintf (F, DEFAULT_NODE_ATTR);
break;
+ case iro_Break:
+ xfprintf (F, "\"%I\"", get_irn_opident(n));
+ xfprintf (F, DEFAULT_NODE_ATTR);
+ break;
case iro_Cond:
xfprintf (F, "\"%I\"", get_irn_opident(n));
xfprintf (F, DEFAULT_NODE_ATTR);
xfprintf (F, "\"%I\"", get_irn_opident(n));
xfprintf (F, DEFAULT_NODE_ATTR);
break;
+ case iro_CallBegin:
+ xfprintf (F, "\"%I\"", get_irn_opident(n));
+ xfprintf (F, DEFAULT_NODE_ATTR);
+ break;
case iro_Return:
xfprintf (F, "\"%I\"", get_irn_opident(n));
xfprintf (F, DEFAULT_NODE_ATTR);
xfprintf (F, "\"%I%I\" ", get_irn_opident(n), get_irn_modeident(n));
xfprintf (F, DEFAULT_NODE_ATTR);
break;
+ case iro_Unknown:
+ xfprintf (F, "\"%I%I\" ", get_irn_opident(n), get_irn_modeident(n));
+ xfprintf (F, DEFAULT_NODE_ATTR);
+ break;
default:
xfprintf (F, "\"%I%I\" ", get_irn_opident(n), get_irn_modeident(n));
}
xfprintf (F, MEM_EDGE_ATTR);
}
break;
+ case iro_EndReg: break;
+ case iro_EndExcept: break;
case iro_Jmp: break;
+ case iro_Break: break;
case iro_Cond: break;
case iro_Return:
case iro_Raise:
case iro_Call:
if (to == 0) xfprintf (F, MEM_EDGE_ATTR);
break;
+ case iro_CallBegin: break;
case iro_Add: break;
case iro_Sub: break;
case iro_Minus: break;
break;
case iro_Tuple: break;
case iro_Proj:
+ case iro_Filter:
switch (get_irn_modecode(from)) {
case irm_X:
xfprintf (F, CF_EDGE_ATTR);
}
break;
case iro_Bad: break;
+ case iro_Unknown: break;
case iro_Id: break;
default:
}
/* dump edges to our inputs */
void
dump_ir_data_edges(ir_node *n) {
- int i, max;
+ int i, visited = get_irn_visited(n);
if ((get_irn_op(n) == op_End) && (!dump_keepalive))
return;
for (i = 0; i < get_irn_arity(n); i++) {
- assert(get_irn_n(n, i));
+ ir_node * pred = get_irn_n(n, i);
+ assert(pred);
+ if (interprocedural_view && get_irn_visited(pred) < visited) continue; /* pred not dumped */
fprintf (F, "edge: {sourcename: \"");
PRINT_NODEID(n);
fprintf (F, "\" targetname: \"");
- PRINT_NODEID(get_irn_n(n, i));
+ PRINT_NODEID(pred);
fprintf (F, "\"");
fprintf (F, " label: \"%d\" ", i);
print_edge_vcgattr(n, i);
/* routines to dump a graph, blocks as conventional nodes. */
/************************************************************************/
+int node_floats(ir_node *n) {
+
+ return ((get_op_pinned(get_irn_op(n)) == floats) &&
+ (get_irg_pinned(current_ir_graph) == floats));
+}
+
void
dump_whole_node (ir_node *n, void* env) {
dump_node(n);
vcg_open (irg, "");
/* walk over the graph */
- irg_walk(irg->end, dump_whole_node, NULL, NULL);
+ /* dump_whole_node must be called in post visiting predecessors */
+ irg_walk(irg->end, NULL, dump_whole_node, NULL);
/* dump the out edges in a separate walk */
if ((dump_out_edge_flag) && (get_irg_outs_state(irg) != no_outs)) {
/* the following routines dump the nodes as attached to the blocks. */
/***********************************************************************/
-int node_floats(ir_node *n) {
-
- return ((get_op_pinned(get_irn_op(n)) == floats) &&
- (get_irg_pinned(current_ir_graph) == floats));
-}
-
void
dump_ir_blocks_nodes (ir_node *n, void *env) {
ir_node *block = (ir_node *)env;
#else
xfprintf (F, "%I", block->op->name);
#endif
- if (exc_normal != get_Block_exc (block))
- fprintf (F, " (%s)", exc_to_string (get_Block_exc (block)));
+ if (exc_normal != get_Block_exc (block))
+ fprintf (F, " (%s)", exc_to_string (get_Block_exc (block)));
xfprintf(F, "\" status:clustered color:%s \n",
- get_Block_matured (block) ? "yellow" : "red");
+ get_Block_matured (block) ? "yellow" : "red");
/* dump the blocks edges */
dump_ir_data_edges(block);
void dump_dominator_information() {
dump_dominator_information_flag = 1;
}
+
+
+static void clear_link(ir_node * node, void * env) {
+ set_irn_link(node, NULL);
+}
+
+
+static inline bool is_Block(ir_node * node) {
+ return !is_no_Block(node);
+}
+
+
+static void collect_blocks_floats_cg(ir_node * node, pmap * map) {
+ if (is_Block(node)
+ || node_floats(node)
+ || get_irn_op(node) == op_Bad
+ || get_irn_op(node) == op_Unknown) {
+ pmap_entry * entry = pmap_find(map, current_ir_graph);
+ if (entry) {
+ ARR_APP1(ir_node *, (ir_node **) entry->value, node);
+ } else {
+ ir_node ** arr = NEW_ARR_F(ir_node *, 1);
+ arr[0] = node;
+ pmap_insert(map, current_ir_graph, arr);
+ }
+ } else {
+ ir_node * block = get_nodes_Block(node);
+ set_irn_link(node, get_irn_link(block));
+ set_irn_link(block, node);
+ }
+}
+
+
+static void dump_cg_ir_block(ir_node * node, void * env) {
+ assert(is_Block(node));
+ xfprintf(F, "graph: { title: \"");
+ PRINT_NODEID(node);
+ fprintf(F, "\" label: \"");
+#ifdef DEBUG_libfirm
+ xfprintf (F, "%ld", get_irn_node_nr(node));
+#else
+ xfprintf (F, "%I", node->op->name);
+#endif
+ if (exc_normal != get_Block_exc(node)) {
+ fprintf (F, " (%s)", exc_to_string (get_Block_exc(node)));
+ }
+
+ xfprintf(F, "\" status:clustered color:%s \n",
+ get_Block_matured(node) ? "yellow" : "red");
+
+ /* dump the blocks edges */
+ dump_ir_data_edges(node);
+
+ /* dump the nodes that go into the block */
+ for (node = get_irn_link(node); node; node = get_irn_link(node)) {
+ dump_node(node);
+ dump_ir_data_edges(node);
+ }
+
+ /* Close the vcg information for the block */
+ xfprintf(F, "}\n\n");
+}
+
+
+/* dump interprocedural graph with surrounding methods */
+void dump_cg_block_graph(ir_graph * irg) {
+ pmap * map = pmap_create();
+ pmap_entry * entry;
+ vcg_open(irg, "");
+
+ irg_walk_graph(irg, clear_link, (irg_walk_func) collect_blocks_floats_cg, map);
+ for (entry = pmap_first(map); entry; entry = pmap_next(map)) {
+ ir_node ** arr = entry->value;
+ int i;
+
+ xfprintf(F, "graph: { title: \"%I\" label: \"%I\" status:clustered color:white \n",
+ get_entity_ident(get_irg_ent(entry->key)),
+ get_entity_ident(get_irg_ent(entry->key)));
+
+ for (i = ARR_LEN(arr) - 1; i >= 0; --i) {
+ ir_node * node = arr[i];
+ if (is_Block(node)) {
+ dump_cg_ir_block(node, NULL);
+ } else {
+ dump_node(node);
+ dump_ir_data_edges(node);
+ }
+ }
+
+ DEL_ARR_F(arr);
+
+ /* Close the vcg information for the irg */
+ xfprintf(F, "}\n\n");
+ }
+
+ pmap_destroy(map);
+
+ vcg_close();
+}
+
+
+/* dump interprocedural block graph with surrounding methods */
+void dump_cg_graph(ir_graph * irg) {
+ pmap * map = pmap_create();
+ pmap_entry * entry;
+ vcg_open(irg, "");
+
+ irg_walk_graph(irg, clear_link, (irg_walk_func) collect_blocks_floats_cg, map);
+ for (entry = pmap_first(map); entry; entry = pmap_next(map)) {
+ ir_node ** arr = entry->value;
+ int i;
+ ident * irg_ident = get_entity_ident(get_irg_ent(entry->key));
+
+ xfprintf(F, "graph: { title: \"%I\" label: \"%I\" status:clustered color:white \n",
+ irg_ident, irg_ident);
+
+ for (i = ARR_LEN(arr) - 1; i >= 0; --i) {
+ ir_node * node = arr[i];
+ dump_node(node);
+ dump_ir_data_edges(node);
+ if (is_Block(node)) {
+ for (node = get_irn_link(node); node; node = get_irn_link(node)) {
+ dump_node(node);
+ dump_ir_block_edge(node);
+ dump_ir_data_edges(node);
+ }
+ }
+ }
+
+ DEL_ARR_F(arr);
+
+ /* Close the vcg information for the irg */
+ xfprintf(F, "}\n\n");
+ }
+
+ pmap_destroy(map);
+
+ vcg_close();
+}
* representation. Some use the original format,
* but most generate an extended format that is only read by some special
* versions of xvcg or by the comercialized version now calles aiSee.
- * A test version of aiSee is available at http://www.absint.de/aisee/download/index.htm.
+ * A test version of aiSee is available at
+ * http://www.absint.de/aisee/download/index.htm.
*
* Most routines use the name of the passed entity as the name of the
* file dumped to.
*/
void dump_ir_block_graph_w_types (ir_graph *irg);
+
+
+/****m* irdump/dump_cg_graph
+ *
+ * NAME
+ * dump_cg_graph
+ * SYNOPSIS
+ * void dump_cg_graph (ir_graph *irg);
+ * FUNCTION
+ * Dumps a interprocedural firm graph as dump_ir_graph.
+ * INPUTS
+ * irg: The firm graph to be dumped.
+ * RESULT
+ * A file containing the firm graph in vcg format.
+ * SEE ALSO
+ ***
+ */
+void dump_cg_graph(ir_graph * irg);
+
+/****m* irdump/dump_cg_block_graph
+ *
+ * NAME
+ * dump_cg_block_graph
+ * SYNOPSIS
+ * void dump_cg_block_graph (ir_graph *irg);
+ * FUNCTION
+ * Dumps a interprocedural firm graph as dump_ir_block_graph.
+ * INPUTS
+ * irg: The firm graph to be dumped.
+ * RESULT
+ * A file containing the firm graph in vcg format.
+ * SEE ALSO
+ ***
+ */
+void dump_cg_block_graph(ir_graph * irg);
+
/****m* irdump/dump_all_ir_graphs
*
* NAME
Spare the Bad predecessors of Phi and Block nodes. */
void
copy_preds (ir_node *n, void *env) {
- ir_node *nn, *block, *on;
+ ir_node *nn, *block;
int i, j;
nn = get_new_node(n);
copy_preds(get_irg_bad(current_ir_graph), NULL);
}
set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
+ if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
+ copy_node(get_irg_unknown(current_ir_graph), NULL);
+ copy_preds(get_irg_unknown(current_ir_graph), NULL);
+ }
+ set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
}
/* Copies all reachable nodes to a new obstack. Removes bad inputs
ir_node *ret, *phi;
ir_node *cf_op, *bl;
int arity, n_ret, n_exc, n_res, i, j, rem_opt;
- type *called_frame, *caller_frame;
+ type *called_frame;
if (!get_optimize() || !get_opt_inline()) return;
/** Turn off optimizations, this can cause problems when allocating new nodes. **/
places all floating nodes reachable from its argument through floating
nodes and adds all beginnings at pinned nodes to the worklist. */
inline void place_early () {
- int i;
- bool del_me;
-
assert(worklist);
inc_irg_visited(current_ir_graph);
}
static void collect_nodes(ir_node *n, void *env) {
- int i;
if (is_no_Block(n)) {
ir_node *b = get_nodes_Block(n);
ir_graph *current_ir_graph;
+bool interprocedural_view = false;
+
#if USE_EXPLICIT_PHI_IN_STACK
/* really defined in ircons.c */
typedef struct Phi_in_stack Phi_in_stack;
res->start_block = new_immBlock ();
res->start = new_Start ();
res->bad = new_ir_node (NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
+ res->unknown = new_ir_node (NULL, res, res->start_block, op_Unknown, mode_T, 0, NULL);
/* Proj results of start node */
projX = new_Proj (res->start, mode_X, pns_initial_exec);
res->end = new_End ();
mature_block(get_cur_block());
res->bad = new_ir_node (NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
+ res->unknown = new_ir_node (NULL, res, res->start_block, op_Unknown, mode_T, 0, NULL);
res->start = new_Start ();
/* Proj results of start node */
projX = new_Proj (res->start, mode_X, pns_initial_exec);
irg->bad = node;
}
+ir_node *
+get_irg_unknown (ir_graph *irg)
+{
+ return irg->unknown;
+}
+
+void
+set_irg_unknown (ir_graph *irg, ir_node *node)
+{
+ irg->unknown = node;
+}
+
ir_node *
get_irg_current_block (ir_graph *irg)
{
irg->pinned = p;
}
+/* maximum of all ir_graphs */
+static int max_irg_visited = 0;
+
+
unsigned long
get_irg_visited (ir_graph *irg)
{
void
inc_irg_visited (ir_graph *irg)
{
- irg->visited = irg->visited++;
+ if (++irg->visited > max_irg_visited) {
+ max_irg_visited = irg->visited;
+ }
+}
+
+unsigned long
+get_max_irg_visited(void)
+{
+ return max_irg_visited;
}
unsigned long
void
inc_irg_block_visited (ir_graph *irg)
{
- irg->block_visited = irg->block_visited++;
+ ++irg->block_visited;
}
optimizations. */
extern ir_graph *current_ir_graph;
+/* This flag indicate the current view. The behaviour of some methods
+ * (get_irn_*, set_irn_*) is influenced by this flag. */
+extern bool interprocedural_view;
+
/* Create a new ir graph to built ir for a procedure.
ent is the entity representing this procedure, i.e., the type of the
entity must be of a method type. The constructor automatically sets the
ir_node *get_irg_bad (ir_graph *irg);
void set_irg_bad (ir_graph *irg, ir_node *node);
+/* Use new_Unknown() instead!! */
+ir_node *get_irg_unknown (ir_graph *irg);
+void set_irg_unknown (ir_graph *irg, ir_node *node);
+
ir_node *get_irg_current_block (ir_graph *irg);
void set_irg_current_block (ir_graph *irg, ir_node *node);
void inc_irg_visited(ir_graph *irg);
unsigned long get_irg_visited (ir_graph *irg);
void set_irg_visited(ir_graph *irg, unsigned long i);
+unsigned long get_max_irg_visited(void);
/* increments block_visited by one */
void inc_irg_block_visited(ir_graph *irg);
struct ir_node *args; /* methods arguments */
struct ir_node *bad; /* bad node of this ir_graph, the one and
only in this graph */
+ struct ir_node *unknown; /* unknown node of this ir_graph */
struct obstack *obst; /* obstack where all of the ir_nodes live */
struct ir_node *current_block; /* block for newly gen_*()-erated
ir_nodes */
# include "irnode.h"
# include "irgraph.h" /* visited flag */
# include "irprog.h"
+# include "irgwalk.h"
-void irg_walk_2(ir_node *node,
- void (pre)(ir_node*, void*), void (post)(ir_node*, void*),
- void *env)
+# include "eset.h"
+
+
+/* walk over an interprocedural graph (callgraph). Visits only graphs in irg_set. */
+static void irg_walk_cg(ir_node * node, int visited, eset * irg_set, irg_walk_func pre, irg_walk_func post, void * env) {
+ int i;
+ ir_graph * rem = current_ir_graph;
+ ir_node * pred;
+
+ assert(node);
+ if (get_irn_visited(node) >= visited) return;
+
+ set_irn_visited(node, visited);
+
+ pred = skip_Proj(node);
+ if (get_irn_op(pred) == op_CallBegin
+ || get_irn_op(pred) == op_EndReg
+ || get_irn_op(pred) == op_EndExcept) {
+ current_ir_graph = get_irn_irg(pred);
+ }
+
+ if (pre) pre(node, env);
+
+ if (is_no_Block(node)) irg_walk_cg(get_nodes_Block(node), visited, irg_set, pre, post, env);
+
+ if (get_irn_op(node) == op_Block) { /* block */
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node * exec = get_irn_n(node, i);
+ ir_node * pred = skip_Proj(exec);
+ if ((get_irn_op(pred) != op_CallBegin
+ && get_irn_op(pred) != op_EndReg
+ && get_irn_op(pred) != op_EndExcept)
+ || eset_contains(irg_set, get_irn_irg(pred))) {
+ irg_walk_cg(exec, visited, irg_set, pre, post, env);
+ }
+ }
+ } else if (get_irn_op(node) == op_Filter) {
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node * pred = get_irn_n(node, i);
+ if (get_irn_op(pred) == op_Unknown || get_irn_op(pred) == op_Bad) {
+ irg_walk_cg(pred, visited, irg_set, pre, post, env);
+ } else {
+ ir_node * exec;
+ exec = skip_Proj(get_Block_cfgpred(get_nodes_Block(node), i));
+ assert(get_irn_op(exec) == op_CallBegin
+ || get_irn_op(exec) == op_EndReg
+ || get_irn_op(exec) == op_EndExcept);
+ if (eset_contains(irg_set, get_irn_irg(exec))) {
+ current_ir_graph = get_irn_irg(exec);
+ irg_walk_cg(pred, visited, irg_set, pre, post, env);
+ current_ir_graph = rem;
+ }
+ }
+ }
+ } else {
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ irg_walk_cg(get_irn_n(node, i), visited, irg_set, pre, post, env);
+ }
+ }
+
+ if (post) post(node, env);
+
+ current_ir_graph = rem;
+}
+
+
+/* Insert all ir_graphs in irg_set, that are (transitive) reachable. */
+static void collect_irgs(ir_node * node, eset * irg_set) {
+ if (get_irn_op(node) == op_Call) {
+ int i;
+ for (i = get_Call_n_callees(node) - 1; i >= 0; --i) {
+ entity * ent = get_Call_callee(node, i);
+ ir_graph * irg = ent ? get_entity_irg(ent) : NULL;
+ if (irg && !eset_contains(irg_set, irg)) {
+ eset_insert(irg_set, irg);
+ irg_walk_graph(irg, (irg_walk_func) collect_irgs, NULL, irg_set);
+ }
+ }
+ }
+}
+
+
+void irg_walk_2(ir_node *node, irg_walk_func pre, irg_walk_func post, void * env)
{
int i;
}
-void irg_walk(ir_node *node,
- void (pre)(ir_node*, void*), void (post)(ir_node*, void*),
- void *env)
+void irg_walk(ir_node *node, irg_walk_func pre, irg_walk_func post, void *env)
{
assert(node);
- inc_irg_visited (current_ir_graph);
- irg_walk_2(node, pre, post, env);
+ if (interprocedural_view) {
+ eset * irg_set = eset_create();
+ int visited;
+ ir_graph * irg;
+ interprocedural_view = false;
+ eset_insert(irg_set, current_ir_graph);
+ irg_walk(node, (irg_walk_func) collect_irgs, NULL, irg_set);
+ interprocedural_view = true;
+ visited = get_max_irg_visited() + 1;
+ irg_walk_cg(node, visited, irg_set, pre, post, env);
+ for (irg = eset_first(irg_set); irg; irg = eset_next(irg_set)) {
+ set_irg_visited(irg, visited);
+ }
+ eset_destroy(irg_set);
+ } else {
+ inc_irg_visited(current_ir_graph);
+ irg_walk_2(node, pre, post, env);
+ }
return;
}
+
+void irg_walk_graph(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *env) {
+ ir_graph * rem = current_ir_graph;
+ current_ir_graph = irg;
+ irg_walk(get_irg_end(irg), pre, post, env);
+ current_ir_graph = rem;
+}
+
+
/***************************************************************************/
/* Executes irg_walk(end, pre, post, env) for all irgraphs in irprog.
Sets current_ir_graph properly for each walk. Conserves current
current_ir_graph. */
-void all_irg_walk(void (pre)(ir_node*, void*), void (post)(ir_node*, void*),
- void *env) {
+void all_irg_walk(irg_walk_func pre, irg_walk_func post, void *env) {
int i;
ir_graph *irg, *rem;
return skip_Proj(n);
}
-void irg_block_walk_2(ir_node *node, void (pre)(ir_node*, void*),
- void (post)(ir_node*, void*), void *env)
+void irg_block_walk_2(ir_node *node, irg_walk_func pre, irg_walk_func post, void *env)
{
int i;
pred = get_nodes_Block(pred);
if(get_irn_opcode(pred) == iro_Block) {
- /* recursion */
- irg_block_walk_2(pred, pre, post, env);
+ /* recursion */
+ irg_block_walk_2(pred, pre, post, env);
}
else {
assert(get_irn_opcode(pred) == iro_Bad);
/* walks only over Block nodes in the graph. Has it's own visited
flag, so that it can be interleaved with the other walker. */
-void irg_block_walk(ir_node *node,
- void (pre)(ir_node*, void*), void (post)(ir_node*, void*),
- void *env)
+void irg_block_walk(ir_node *node, irg_walk_func pre, irg_walk_func post, void *env)
{
ir_node *block, *pred;
int i;
assert(node);
+ assert(!interprocedural_view); /* interprocedural_view not implemented, because it
+ * interleaves with irg_walk */
inc_irg_block_visited(current_ir_graph);
if (is_no_Block(node)) block = get_nodes_Block(node); else block = node;
assert(get_irn_opcode(block) == iro_Block);
return;
}
+
+
+void irg_block_walk_graph(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *env) {
+ ir_graph * rem = current_ir_graph;
+ current_ir_graph = irg;
+ irg_block_walk(get_irg_end(irg), pre, post, env);
+ current_ir_graph = rem;
+}
# include "irnode.h"
+/* type of callback function for ir_graph walk */
+typedef void (* irg_walk_func)(ir_node *, void *);
+
/* Walks over the ir graph, starting at the node given as first argument.
Executes pre before visiting the predecessor of a node, post after.
irg_walk uses the visited flag in irg and the nodes to determine visited
flag. It marks the node as visited before executing pre.
The void* env can be used to pass status information between the
pre and post functions. */
-void irg_walk(ir_node *node,
- void (pre)(ir_node*, void*), void (post)(ir_node*, void*),
- void *env);
+void irg_walk(ir_node *node, irg_walk_func pre, irg_walk_func post, void *env);
+
+/* Like "irg_walk", but walks over all reachable nodes in the ir
+ * graph, starting at the end operation. */
+void irg_walk_graph(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *env);
/* Executes irg_walk(end, pre, post, env) for all irgraphs in irprog.
Sets current_ir_graph properly for each walk. Conserves current
current_ir_graph. */
-void all_irg_walk(void (pre)(ir_node*, void*), void (post)(ir_node*, void*),
- void *env);
+void all_irg_walk(irg_walk_func pre, irg_walk_func post, void *env);
/* Walks only over Block nodes in the graph. Has it's own visited
flag, so that it can be interleaved with the other walker.
If a none block is passed, starts at the block this node belongs to.
If end is passed also visites kept alive blocks. */
-void irg_block_walk(ir_node *node,
- void (pre)(ir_node*, void*), void (post)(ir_node*, void*),
- void *env);
+void irg_block_walk(ir_node *node, irg_walk_func pre, irg_walk_func post, void *env);
+
+/* Like "irg_block_walk", but walks over all reachable blocks in the
+ * ir graph, starting at the end block. */
+void irg_block_walk_graph(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *env);
# endif /* _IRGWALK_H_ */
case iro_Jmp:
case iro_Return:
case iro_End:
+ case iro_Break:
+ case iro_EndReg:
+ case iro_EndExcept:
+ case iro_CallBegin:
break;
default:
XPF1 ("%I", get_irn_mode(np)->name);
/* returns the number of predecessors without the block predecessor. */
inline int
-get_irn_arity (ir_node *node)
-{
+get_irn_arity (ir_node *node) {
assert(node);
- return (ARR_LEN((node)->in)-1);
+ if (interprocedural_view) { /* handle Filter and Block specially */
+ if (get_irn_opcode(node) == iro_Filter) {
+ assert(node->attr.filter.in_cg);
+ return ARR_LEN(node->attr.filter.in_cg) - 1;
+ } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
+ return ARR_LEN(node->attr.block.in_cg) - 1;
+ }
+ /* else fall through */
+ }
+ return ARR_LEN(node->in) - 1;
}
/* Returns the array with ins. This array is shifted with respect to the
lists of operands as predecessors of Block or arguments of a Call are
consecutive. */
inline ir_node **
-get_irn_in (ir_node *node)
-{
- assert (node);
- return ((node)->in);
+get_irn_in (ir_node *node) {
+ assert(node);
+ if (interprocedural_view) { /* handle Filter and Block specially */
+ if (get_irn_opcode(node) == iro_Filter) {
+ assert(node->attr.filter.in_cg);
+ return node->attr.filter.in_cg;
+ } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
+ return node->attr.block.in_cg;
+ }
+ /* else fall through */
+ }
+ return node->in;
}
inline void
set_irn_in (ir_node *node, int arity, ir_node **in) {
+ ir_node *** arr;
assert(node);
- if (arity != get_irn_arity(node)) {
- ir_node *block = node->in[0];
- node->in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (arity+1));
- node->in[0] = block;
+ if (interprocedural_view) { /* handle Filter and Block specially */
+ if (get_irn_opcode(node) == iro_Filter) {
+ assert(node->attr.filter.in_cg);
+ arr = &node->attr.filter.in_cg;
+ } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
+ arr = &node->attr.block.in_cg;
+ } else {
+ arr = &node->in;
+ }
+ } else {
+ arr = &node->in;
+ }
+ if (arity != ARR_LEN(*arr) - 1) {
+ ir_node * block = (*arr)[0];
+ *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
+ (*arr)[0] = block;
}
- memcpy (&node->in[1], in, sizeof (ir_node *) * arity);
+ memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
}
/* to iterate through the predecessors without touching the array */
i < get_irn_arity.
If it is a block, the entry -1 is NULL. */
inline ir_node *
-get_irn_n (ir_node *node, int n)
-{
- ir_node *res;
- assert (node);
- assert ((get_irn_arity (node) > n) && (-1 <= n));
- res = skip_nop(node->in[n+1]);
- if (res != node->in[n+1]) node->in[n+1] = res;
- return res;
+get_irn_n (ir_node *node, int n) {
+ assert(node && -1 <= n && n < get_irn_arity(node));
+ if (interprocedural_view) { /* handle Filter and Block specially */
+ if (get_irn_opcode(node) == iro_Filter) {
+ assert(node->attr.filter.in_cg);
+ return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
+ } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
+ return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
+ }
+ /* else fall through */
+ }
+ return (node->in[n + 1] = skip_nop(node->in[n + 1]));
}
inline void
-set_irn_n (ir_node *node, int n, ir_node *in)
-{
- assert (node);
- assert (get_irn_arity (node) > n);
- node->in[n+1] = in;
+set_irn_n (ir_node *node, int n, ir_node *in) {
+ assert(node && -1 <= n && n < get_irn_arity(node));
+ if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
+ /* Change block pred in both views! */
+ node->in[n + 1] = in;
+ assert(node->attr.filter.in_cg);
+ node->attr.filter.in_cg[n + 1] = in;
+ return;
+ }
+ if (interprocedural_view) { /* handle Filter and Block specially */
+ if (get_irn_opcode(node) == iro_Filter) {
+ assert(node->attr.filter.in_cg);
+ node->attr.filter.in_cg[n + 1] = in;
+ return;
+ } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
+ node->attr.block.in_cg[n + 1] = in;
+ return;
+ }
+ /* else fall through */
+ }
+ node->in[n + 1] = in;
}
inline ir_mode *
return (block->attr.block.exc);
}
+void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
+ assert(node->op == op_Block);
+ if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
+ node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
+ node->attr.block.in_cg[0] = NULL;
+ }
+ memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
+}
+
+void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
+ assert(node->op == op_Block && node->attr.block.in_cg && 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
+ node->attr.block.in_cg[pos + 1] = pred;
+}
+
+ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
+ assert(node->op == op_Block);
+ return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
+}
+
+int get_Block_cg_n_cfgpreds(ir_node * node) {
+ assert(node->op == op_Block && node->attr.block.in_cg);
+ return ARR_LEN(node->attr.block.in_cg) - 1;
+}
+
+void remove_Block_cg_cfgpred_arr(ir_node * node) {
+ assert(node->op == op_Block);
+ node->attr.block.in_cg = NULL;
+}
+
inline int
get_End_n_keepalives(ir_node *end) {
assert (end->op == op_End);
ARR_APP1 (ir_node *, end->in, ka);
}
+inline ir_node *
+set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
+ assert (end->op == op_End);
+ set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
+}
+
inline void
free_End (ir_node *end) {
/* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
node->attr.call.cld_tp = type;
}
+int get_Call_n_callees(ir_node * node) {
+ assert(node->op == op_Call && node->attr.call.callee_arr);
+ return ARR_LEN(node->attr.call.callee_arr);
+}
+
+entity * get_Call_callee(ir_node * node, int pos) {
+ assert(node->op == op_Call && node->attr.call.callee_arr);
+ return node->attr.call.callee_arr[pos];
+}
+
+void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
+ assert(node->op == op_Call);
+ if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
+ node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
+ }
+ memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
+}
+
+void remove_Call_callee_arr(ir_node * node) {
+ assert(node->op == op_Call);
+ node->attr.call.callee_arr = NULL;
+}
+
/* For unary and binary arithmetic operations the access to the
operands can be factored out. Left is the first, right the
second arithmetic value as listed in tech report 0999-33.
inline ir_node *
get_Proj_pred (ir_node *node) {
- assert (node->op == op_Proj);
+ assert (is_Proj(node));
return get_irn_n(node, 0);
}
inline void
set_Proj_pred (ir_node *node, ir_node *pred) {
- assert (node->op == op_Proj);
+ assert (is_Proj(node));
set_irn_n(node, 0, pred);
}
inline long
get_Proj_proj (ir_node *node) {
- assert (node->op == op_Proj);
- return node->attr.proj;
+ assert (is_Proj(node));
+ if (get_irn_opcode(node) == iro_Proj) {
+ return node->attr.proj;
+ } else {
+ assert(get_irn_opcode(node) == iro_Filter);
+ return node->attr.filter.proj;
+ }
}
inline void
set_irn_n(node, 0, pred);
}
+
+inline ir_node *
+get_Filter_pred(ir_node *node) {
+ assert(node->op == op_Filter);
+ return node->in[1];
+}
+
+inline long
+get_Filter_proj(ir_node *node) {
+ assert(node->op == op_Filter);
+ return node->attr.filter.proj;
+}
+
+void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
+ assert(node->op == op_Filter);
+ if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
+ node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
+ node->attr.filter.in_cg[0] = node->in[0];
+ }
+ memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
+}
+
+void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
+ assert(node->op == op_Filter && node->attr.filter.in_cg && 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
+ node->attr.filter.in_cg[pos + 1] = pred;
+}
+
+
+inline ir_graph *
+get_irn_irg(ir_node *node) {
+ if (get_irn_op(node) == op_CallBegin) {
+ return node->attr.callbegin.irg;
+ } else if (get_irn_op(node) == op_EndReg || get_irn_op(node) == op_EndExcept) {
+ return node->attr.end.irg;
+ } else {
+ assert(0 && "no irg attr");
+ }
+}
+
+
/******************************************************************/
/* Auxiliary routines */
/******************************************************************/
inline ir_node *
skip_Proj (ir_node *node) {
/* don't assert node !!! */
- if (node && (node->op == op_Proj)) {
+ if (node && is_Proj(node)) {
return get_Proj_pred(node);
} else {
return node;
return (get_irn_opcode(node) != iro_Block);
}
+inline int
+is_Proj (ir_node *node) {
+ assert(node);
+ return node->op == op_Proj
+ || (!interprocedural_view && node->op == op_Filter);
+}
+
/* Returns true if the operation manipulates control flow. */
int
is_cfop(ir_node *node) {
|| (get_irn_opcode(node) == iro_Load)
|| (get_irn_opcode(node) == iro_Store)
|| (get_irn_opcode(node) == iro_Alloc)
- || (get_irn_opcode(node) == iro_Bad));
+ || (get_irn_opcode(node) == iro_Bad)
+ || (get_irn_opcode(node) == iro_Unknown));
}
case iro_Alloc :
return get_irn_n(node, 0);
case iro_Bad :
+ case iro_Unknown:
return node;
default: ;
+ assert(0 && "not reached");
+ return NULL;
}
}
void set_Block_exc (ir_node*, exc_t);
exc_t get_Block_exc (ir_node*);
+/* Set and remove interprocedural predecessors. If the interprocedural
+ * predecessors are removed, the node has the same predecessors in
+ * both views. */
+void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in);
+void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred);
+ir_node ** get_Block_cg_cfgpred_arr(ir_node * node);
+int get_Block_cg_n_cfgpreds(ir_node * node);
+void remove_Block_cg_cfgpred_arr(ir_node * node);
+
inline int get_End_n_keepalives(ir_node *end);
inline ir_node *get_End_keepalive(ir_node *end, int pos);
inline void add_End_keepalive (ir_node *end, ir_node *ka);
+inline ir_node *set_End_keepalive(ir_node *end, int pos, ir_node *ka);
/* Some parts of the End node are allocated seperately -- their memory
is not recovered by dead_node_elimination if a End node is dead.
free_End frees these data structures. */
inline type *get_Call_type (ir_node *node);
inline void set_Call_type (ir_node *node, type *type);
+/* Set, get and remove the callee-analysis. */
+int get_Call_n_callees(ir_node * node);
+entity * get_Call_callee(ir_node * node, int pos);
+void set_Call_callee_arr(ir_node * node, int n, entity ** arr);
+void remove_Call_callee_arr(ir_node * node);
+
/* For unary and binary arithmetic operations the access to the
operands can be factored out. Left is the first, right the
second arithmetic value as listed in tech report 1999-44.
inline ir_node *get_Id_pred (ir_node *node);
inline void set_Id_pred (ir_node *node, ir_node *pred);
+inline ir_node *get_Filter_pred(ir_node *node);
+inline long get_Filter_proj(ir_node *node);
+/* set the interprocedural predecessors */
+void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in);
+void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred);
+
+/* Returns the ir_graph this node belongs to. Only valid for
+ * CallBegin, EndReg and EndExcept */
+inline ir_graph *get_irn_irg(ir_node *node);
+
/*****/
/****s* irnode/other2
inline int is_Bad (ir_node *node);
/* returns true if the node is not a Block */
inline int is_no_Block (ir_node *node);
+/* returns true if node is a Proj node or a Filter node in
+ * intraprocedural view */
+inline int is_Proj (ir_node *node);
/* Returns true if the operation manipulates control flow:
Start, End, Jmp, Cond, Return, Raise, Bad */
int is_cfop(ir_node *node);
in different phases. Eventually inline the whole
datastructure. */
exc_t exc; /* role of this block for exception handling */
+ ir_node ** in_cg; /* array with predecessors in
+ * interprocedural_view, if they differ
+ * from intraprocedural predecessors */
} block_attr;
/* Cond attributes */
#if PRECISE_EXC_CONTEXT
struct ir_node **frag_arr; /* For Phi node construction in case of exceptions */
#endif
+ entity ** callee_arr; /* result of callee analysis */
} call_attr;
/* Alloc attributes */
#endif
} alloc_attr;
+/* Filter attributes */
+typedef struct {
+ long proj; /* contains the result position to project (Proj) */
+ ir_node ** in_cg; /* array with interprocedural predecessors (Phi) */
+} filter_attr;
+
+/* EndReg/EndExcept attributes */
+typedef struct {
+ ir_graph * irg; /* ir_graph this node belongs to (for
+ * navigating in interprocedural graphs) */
+} end_attr;
+
+/* CallBegin attributes */
+typedef struct {
+ ir_graph * irg; /* ir_graph this node belongs to (for
+ * navigating in interprocedural graphs) */
+ ir_node * call; /* associated Call-operation */
+} callbegin_attr;
+
/* Some irnodes just have one attribute, these are stored here,
some have more. Their name is 'irnodename_attr' */
typedef union {
node takes the role of the obsolete Phi0 node,
therefore the name. */
long proj; /* For Proj: contains the result position to project */
+ filter_attr filter; /* For Filter */
+ end_attr end; /* For EndReg, EndExcept */
+ callbegin_attr callbegin; /* For CallBegin */
#if PRECISE_EXC_CONTEXT
struct ir_node **frag_arr; /* For Phi node construction in case of exceptions
for nodes Store, Load, Div, Mod, Quot, DivMod. */
type *get_irn_call_attr (ir_node *node);
sel_attr get_irn_sel_attr (ir_node *node);
int get_irn_phi_attr (ir_node *node);
-block_attr get_irn_return_attr (ir_node *node);
+block_attr get_irn_block_attr (ir_node *node);
# endif /* _IRNODE_T_H_ */
ir_op *op_Id;
ir_op *op_Bad;
+ir_op *op_Unknown;
+ir_op *op_Filter;
+ir_op *op_Break;
+ir_op *op_CallBegin;
+ir_op *op_EndReg;
+ir_op *op_EndExcept;
+
ir_op *
new_ir_op (opcode code, char *name, op_pinned p, int labeled, size_t attr_size)
op_Tuple = new_ir_op (iro_Tuple, "Tuple", floats, 1, 0);
op_Id = new_ir_op (iro_Id, "Id", floats, 0, 0);
op_Bad = new_ir_op (iro_Bad, "Bad", floats, 0, 0);
+
+ op_Unknown = new_ir_op (iro_Unknown, "Unknown", floats, 0, 0);
+ op_Filter = new_ir_op (iro_Filter, "Filter", pinned, 0, sizeof(filter_attr));
+ op_Break = new_ir_op (iro_Break, "Break", pinned, 0, 0);
+ op_CallBegin = new_ir_op (iro_CallBegin, "CallBegin", pinned, 0, sizeof(callbegin_attr));
+ op_EndReg = new_ir_op (iro_EndReg, "EndReg", pinned, 0, sizeof(end_attr));
+ op_EndExcept = new_ir_op (iro_EndExcept, "EndExcept", pinned, 0, sizeof(end_attr));
}
/* Returns the string for the opcode. */
|| (op == op_Return)
|| (op == op_Raise)
|| (op == op_Bad)
- || (op == op_End));
+ || (op == op_End)
+ || (op == op_Unknown)
+ || (op == op_Break)
+ || (op == op_CallBegin)
+ || (op == op_EndReg)
+ || (op == op_EndExcept));
}
iro_Cmp, iro_Shl, iro_Shr, iro_Shrs, iro_Rot, iro_Conv,
iro_Phi,
iro_Load, iro_Store, iro_Alloc, iro_Free, iro_Sync,
- iro_Proj, iro_Tuple, iro_Id, iro_Bad
+ iro_Proj, iro_Tuple, iro_Id, iro_Bad,
+ iro_Unknown, iro_Filter, iro_Break, iro_CallBegin, iro_EndReg, iro_EndExcept
} opcode;
typedef struct ir_op ir_op;
extern ir_op *op_Id;
extern ir_op *op_Bad;
+extern ir_op *op_Unknown;
+extern ir_op *op_Filter;
+extern ir_op *op_Break;
+extern ir_op *op_CallBegin;
+extern ir_op *op_EndReg;
+extern ir_op *op_EndExcept;
+
/* Returns the string for the opcode. */
const char *get_op_name (ir_op *op);
return get_irn_const_attr (a) != get_irn_const_attr (b);
case iro_Proj:
return get_irn_proj_attr (a) != get_irn_proj_attr (b);
+ case iro_Filter:
+ return get_Filter_proj(a) != get_Filter_proj(b);
case iro_Alloc:
return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
|| (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
if (opcode != iro_Phi && opcode != iro_Block)
for (i = 0; i < get_irn_arity(n); i++)
- if (get_irn_opcode(get_irn_n(n, i)) == iro_Bad)
+ if (get_irn_opcode(get_irn_n(n, i)) == iro_Bad
+ || get_irn_opcode(get_irn_n(n, i)) == iro_Unknown)
return;
mymode = get_irn_mode (n);
/* Cond: BB x Iu --> X^n */
|| op1mode == mode_I) && "Cond node"
);
- assert (mymode == mode_T);
+ assert (mymode == mode_T);
break;
case iro_Return:
op1mode = get_irn_mode(in[1]);
mt = get_entity_type(get_irg_ent(current_ir_graph));
assert(get_Return_n_res(n) == get_method_n_res(mt) &&
"Number of results for Return doesn't match number of results in type.");
- for (i = 0; i < get_Return_n_res(n); i++)
- assert((get_irn_mode(get_Return_res(n, i))
- == get_type_mode(get_method_res_type(mt, i))) &&
- "Mode of result for Return doesn't match mode of result type.");
+ for (i = 0; i < get_Return_n_res(n); i++)
+ assert((get_irn_mode(get_Return_res(n, i))
+ == get_type_mode(get_method_res_type(mt, i))) &&
+ "Mode of result for Return doesn't match mode of result type.");
break;
case iro_Raise:
case iro_Tuple:
/* We don't test */
break;
+ case iro_CallBegin:
+ break;
+ case iro_EndReg:
+ break;
+ case iro_EndExcept:
+ break;
default: assert(0);
}
}
inline void
set_entity_irg(entity *ent, ir_graph *irg) {
assert (ent && ent->type);
- assert (irg);
+ /* Wie kann man die Referenz auf einen IRG löschen, z.B. wenn die
+ * Methode selbst nicht mehr aufgerufen werden kann, die Entität
+ * aber erhalten bleiben soll. */
+ /* assert (irg); */
assert (is_method_type(ent->type));
assert (ent->peculiarity == existent);
ent->irg = irg;