#ifdef FIRM_STATISTICS
#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
+#ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+#endif
+#ifdef HAVE_STRING_H
+# include <string.h>
+#endif
+
+#include "irouts.h"
+#include "irdump.h"
+#include "hashptr.h"
#include "firmstat_t.h"
#include "pattern.h"
#include "dags.h"
+#include "stat_dmp.h"
+#include "xmalloc.h"
+#include "irhooks.h"
-/**
- * names of the optimizations
+/*
+ * need this to be static:
+ * Special pseudo Opcodes that we need to count some interesting cases
*/
-static const char *opt_names[] = {
- "straightening optimization",
- "if simplification",
- "algebraic simplification",
- "Phi optmization",
- "Write-After-Write optimization",
- "Write-After-Read optimization",
- "Read-After-Write optimization",
- "Read-After-Read optimization",
- "Tuple optimization",
- "ID optimization",
- "Constant evaluation",
- "Strength reduction",
- "Lowered",
-};
/**
- * need this to be static
+ * The Phi0, a node that is created during SSA construction
*/
-static ir_op _op_Phi0, _op_PhiM;
+static ir_op _op_Phi0;
+
+/** The PhiM, just to count memory Phi's. */
+static ir_op _op_PhiM;
+
+/** The Mul by Const node. */
+static ir_op _op_MulC;
+
+/** The Div by Const node. */
+static ir_op _op_DivC;
+
+/** The Div by Const node. */
+static ir_op _op_ModC;
+
+/** The Div by Const node. */
+static ir_op _op_DivModC;
+
+/** The memory Proj node. */
+static ir_op _op_ProjM;
+
+/** A Sel of a Sel */
+static ir_op _op_SelSel;
+
+/** A Sel of a Sel of a Sel */
+static ir_op _op_SelSelSel;
/* ---------------------------------------------------------------------------------- */
+/** Marks the begin of a statistic (hook) function. */
#define STAT_ENTER ++status->recursive
+
+/** Marks the end of a statistic (hook) functions. */
#define STAT_LEAVE --status->recursive
+
+/** Allows to enter a statistic function only when we are not already in a hook. */
#define STAT_ENTER_SINGLE do { if (status->recursive > 0) return; ++status->recursive; } while (0)
/**
* global status
*/
-static stat_info_t _status, *status = &_status;
+static const int status_disable = 0;
+static stat_info_t *status = (stat_info_t *)&status_disable;
/**
* compare two elements of the opcode hash
}
/**
- * compare two elements of the block hash
+ * compare two elements of the block/extbb hash
*/
static int block_cmp(const void *elt, const void *key)
{
return e1->block_nr != e2->block_nr;
}
+/**
+ * compare two elements of the be_block hash
+ */
+static int be_block_cmp(const void *elt, const void *key)
+{
+ const be_block_entry_t *e1 = elt;
+ const be_block_entry_t *e2 = key;
+
+ return e1->block_nr != e2->block_nr;
+}
+
+/**
+ * compare two elements of reg pressure hash
+ */
+static int reg_pressure_cmp(const void *elt, const void *key)
+{
+ const reg_pressure_entry_t *e1 = elt;
+ const reg_pressure_entry_t *e2 = key;
+
+ return e1->class_name != e2->class_name;
+}
+
+/**
+ * compare two elements of the perm_stat hash
+ */
+static int perm_stat_cmp(const void *elt, const void *key)
+{
+ const perm_stat_entry_t *e1 = elt;
+ const perm_stat_entry_t *e2 = key;
+
+ return e1->perm != e2->perm;
+}
+
+/**
+ * compare two elements of the perm_class hash
+ */
+static int perm_class_cmp(const void *elt, const void *key)
+{
+ const perm_class_entry_t *e1 = elt;
+ const perm_class_entry_t *e2 = key;
+
+ return e1->class_name != e2->class_name;
+}
+
/**
* compare two elements of the ir_op hash
*/
return e1->code != e2->code;
}
+/**
+ * compare two elements of the address_mark set
+ */
+static int address_mark_cmp(const void *elt, const void *key, size_t size)
+{
+ const address_mark_entry_t *e1 = elt;
+ const address_mark_entry_t *e2 = key;
+
+ /* compare only the nodes, the rest is used as data container */
+ return e1->node != e2->node;
+}
+
/**
* clears all counter in a node_entry_t
*/
/**
* Returns the associates node_entry_t for an ir_op
+ *
+ * @param op the IR operation
+ * @param hmap a hash map containing ir_op* -> node_entry_t*
*/
-static node_entry_t *opcode_get_entry(const ir_op *op, pset *set)
+static node_entry_t *opcode_get_entry(const ir_op *op, hmap_node_entry_t *hmap)
{
node_entry_t key;
node_entry_t *elem;
key.op = op;
- elem = pset_find(set, &key, op->code);
+ elem = pset_find(hmap, &key, op->code);
if (elem)
return elem;
elem = obstack_alloc(&status->cnts, sizeof(*elem));
+ memset(elem, 0, sizeof(*elem));
/* clear counter */
opcode_clear_entry(elem);
elem->op = op;
- return pset_insert(set, elem, op->code);
+ return pset_insert(hmap, elem, op->code);
}
/**
* Returns the associates ir_op for an opcode
+ *
+ * @param code the IR opcode
+ * @param hmap the hash map containing opcode -> ir_op*
*/
-static ir_op *opcode_find_entry(opcode code, pset *set)
+static ir_op *opcode_find_entry(opcode code, hmap_ir_op *hmap)
{
ir_op key;
key.code = code;
- return pset_find(set, &key, code);
-}
-
-/**
- * calculates a hash value for an irg
- * Addresses are typically aligned at 32bit, so we ignore the lowest bits
- */
-static INLINE unsigned irg_hash(const ir_graph *irg)
-{
- return (unsigned)irg >> 3;
+ return pset_find(hmap, &key, code);
}
/**
* clears all counter in a graph_entry_t
*/
-static void graph_clear_entry(graph_entry_t *elem)
+static void graph_clear_entry(graph_entry_t *elem, int all)
{
- cnt_clr(&elem->cnt_walked);
- cnt_clr(&elem->cnt_walked_blocks);
- cnt_clr(&elem->cnt_was_inlined);
- cnt_clr(&elem->cnt_got_inlined);
- cnt_clr(&elem->cnt_strength_red);
+ if (all) {
+ cnt_clr(&elem->cnt_walked);
+ cnt_clr(&elem->cnt_walked_blocks);
+ cnt_clr(&elem->cnt_was_inlined);
+ cnt_clr(&elem->cnt_got_inlined);
+ cnt_clr(&elem->cnt_strength_red);
+ cnt_clr(&elem->cnt_real_func_call);
+ }
cnt_clr(&elem->cnt_edges);
+ cnt_clr(&elem->cnt_all_calls);
+ cnt_clr(&elem->cnt_call_with_cnst_arg);
+ cnt_clr(&elem->cnt_indirect_calls);
+
+ if (elem->block_hash) {
+ del_pset(elem->block_hash);
+ elem->block_hash = NULL;
+ }
+
+ if (elem->extbb_hash) {
+ del_pset(elem->extbb_hash);
+ elem->extbb_hash = NULL;
+ }
+
+ obstack_free(&elem->recalc_cnts, NULL);
+ obstack_init(&elem->recalc_cnts);
}
/**
- * Returns the acssociates graph_entry_t for an irg
+ * Returns the associated graph_entry_t for an IR graph.
+ *
+ * @param irg the IR graph
+ * @param hmap the hash map containing ir_graph* -> graph_entry_t*
*/
-static graph_entry_t *graph_get_entry(ir_graph *irg, pset *set)
+static graph_entry_t *graph_get_entry(ir_graph *irg, hmap_graph_entry_t *hmap)
{
graph_entry_t key;
graph_entry_t *elem;
key.irg = irg;
- elem = pset_find(set, &key, irg_hash(irg));
- if (elem)
+ elem = pset_find(hmap, &key, HASH_PTR(irg));
+
+ if (elem) {
+ /* create hash map backend block information */
+ if (! elem->be_block_hash)
+ elem->be_block_hash = new_pset(be_block_cmp, 5);
+
return elem;
+ }
+ /* allocate a new one */
elem = obstack_alloc(&status->cnts, sizeof(*elem));
+ memset(elem, 0, sizeof(*elem));
+ obstack_init(&elem->recalc_cnts);
/* clear counter */
- graph_clear_entry(elem);
+ graph_clear_entry(elem, 1);
/* new hash table for opcodes here */
elem->opcode_hash = new_pset(opcode_cmp, 5);
- elem->block_hash = new_pset(block_cmp, 5);
+ elem->address_mark = new_set(address_mark_cmp, 5);
elem->irg = irg;
+ /* these hash tables are created on demand */
+ elem->block_hash = NULL;
+ elem->extbb_hash = NULL;
+
for (i = 0; i < sizeof(elem->opt_hash)/sizeof(elem->opt_hash[0]); ++i)
elem->opt_hash[i] = new_pset(opt_cmp, 4);
- return pset_insert(set, elem, irg_hash(irg));
+ return pset_insert(hmap, elem, HASH_PTR(irg));
}
/**
}
/**
- * Returns the associates opt_entry_t for an ir_op
+ * Returns the associated opt_entry_t for an IR operation.
+ *
+ * @param op the IR operation
+ * @param hmap the hash map containing ir_op* -> opt_entry_t*
*/
-static opt_entry_t *opt_get_entry(const ir_op *op, pset *set)
+static opt_entry_t *opt_get_entry(const ir_op *op, hmap_opt_entry_t *hmap)
{
opt_entry_t key;
opt_entry_t *elem;
key.op = op;
- elem = pset_find(set, &key, op->code);
+ elem = pset_find(hmap, &key, op->code);
if (elem)
return elem;
elem = obstack_alloc(&status->cnts, sizeof(*elem));
+ memset(elem, 0, sizeof(*elem));
/* clear new counter */
opt_clear_entry(elem);
elem->op = op;
- return pset_insert(set, elem, op->code);
+ return pset_insert(hmap, elem, op->code);
}
/**
cnt_clr(&elem->cnt_edges);
cnt_clr(&elem->cnt_in_edges);
cnt_clr(&elem->cnt_out_edges);
+ cnt_clr(&elem->cnt_phi_data);
}
/**
- * Returns the associates block_entry_t for an block
+ * Returns the associated block_entry_t for an block.
+ *
+ * @param block_nr an IR block number
+ * @param hmap a hash map containing long -> block_entry_t
*/
-static block_entry_t *block_get_entry(long block_nr, pset *set)
+static block_entry_t *block_get_entry(struct obstack *obst, long block_nr, hmap_block_entry_t *hmap)
{
block_entry_t key;
block_entry_t *elem;
key.block_nr = block_nr;
- elem = pset_find(set, &key, block_nr);
+ elem = pset_find(hmap, &key, block_nr);
if (elem)
return elem;
- elem = obstack_alloc(&status->cnts, sizeof(*elem));
+ elem = obstack_alloc(obst, sizeof(*elem));
+ memset(elem, 0, sizeof(*elem));
/* clear new counter */
block_clear_entry(elem);
elem->block_nr = block_nr;
- return pset_insert(set, elem, block_nr);
+ return pset_insert(hmap, elem, block_nr);
+}
+
+/**
+ * clears all sets in be_block_entry_t
+ */
+static void be_block_clear_entry(be_block_entry_t *elem)
+{
+ if (elem->reg_pressure)
+ del_pset(elem->reg_pressure);
+
+ if (elem->sched_ready)
+ stat_delete_distrib_tbl(elem->sched_ready);
+
+ if (elem->perm_class_stat)
+ del_pset(elem->perm_class_stat);
+
+ elem->reg_pressure = new_pset(reg_pressure_cmp, 5);
+ elem->sched_ready = stat_new_int_distrib_tbl();
+ elem->perm_class_stat = new_pset(perm_class_cmp, 5);
+}
+
+/**
+ * Returns the associated be_block_entry_t for an block.
+ *
+ * @param block_nr an IR block number
+ * @param hmap a hash map containing long -> be_block_entry_t
+ */
+static be_block_entry_t *be_block_get_entry(struct obstack *obst, long block_nr, hmap_be_block_entry_t *hmap)
+{
+ be_block_entry_t key;
+ be_block_entry_t *elem;
+
+ key.block_nr = block_nr;
+
+ elem = pset_find(hmap, &key, block_nr);
+ if (elem)
+ return elem;
+
+ elem = obstack_alloc(obst, sizeof(*elem));
+ memset(elem, 0, sizeof(*elem));
+
+ /* clear new counter */
+ be_block_clear_entry(elem);
+
+ elem->block_nr = block_nr;
+
+ return pset_insert(hmap, elem, block_nr);
+}
+
+/**
+ * clears all sets in perm_class_entry_t
+ */
+static void perm_class_clear_entry(perm_class_entry_t *elem) {
+ if (elem->perm_stat)
+ del_pset(elem->perm_stat);
+
+ elem->perm_stat = new_pset(perm_stat_cmp, 5);
+}
+
+/**
+ * Returns the associated perm_class entry for a register class.
+ *
+ * @param class_name the register class name
+ * @param hmap a hash map containing class_name -> perm_class_entry_t
+ */
+static perm_class_entry_t *perm_class_get_entry(struct obstack *obst, const char *class_name,
+ hmap_perm_class_entry_t *hmap)
+{
+ perm_class_entry_t key;
+ perm_class_entry_t *elem;
+
+ key.class_name = class_name;
+
+ elem = pset_find(hmap, &key, HASH_PTR(class_name));
+ if (elem)
+ return elem;
+
+ elem = obstack_alloc(obst, sizeof(*elem));
+ memset(elem, 0, sizeof(*elem));
+
+ /* clear new counter */
+ perm_class_clear_entry(elem);
+
+ elem->class_name = class_name;
+
+ return pset_insert(hmap, elem, HASH_PTR(class_name));
+}
+
+/**
+ * clears all sets in perm_stat_entry_t
+ */
+static void perm_stat_clear_entry(perm_stat_entry_t *elem) {
+ if (elem->chains)
+ stat_delete_distrib_tbl(elem->chains);
+
+ if (elem->cycles)
+ stat_delete_distrib_tbl(elem->cycles);
+
+ elem->chains = stat_new_int_distrib_tbl();
+ elem->cycles = stat_new_int_distrib_tbl();
}
+/**
+ * Returns the associated perm_stat entry for a perm.
+ *
+ * @param perm the perm node
+ * @param hmap a hash map containing perm -> perm_stat_entry_t
+ */
+static perm_stat_entry_t *perm_stat_get_entry(struct obstack *obst, ir_node *perm, hmap_perm_stat_entry_t *hmap)
+{
+ perm_stat_entry_t key;
+ perm_stat_entry_t *elem;
+
+ key.perm = perm;
+
+ elem = pset_find(hmap, &key, HASH_PTR(perm));
+ if (elem)
+ return elem;
+
+ elem = obstack_alloc(obst, sizeof(*elem));
+ memset(elem, 0, sizeof(*elem));
+
+ /* clear new counter */
+ perm_stat_clear_entry(elem);
+
+ elem->perm = perm;
+
+ return pset_insert(hmap, elem, HASH_PTR(perm));
+}
/**
* Returns the ir_op for an IR-node,
- * handles special cases and return pseudo op codes
+ * handles special cases and return pseudo op codes.
+ *
+ * @param none an IR node
*/
-static ir_op *stat_get_irn_op(const ir_node *node)
+static ir_op *stat_get_irn_op(ir_node *node)
{
ir_op *op = get_irn_op(node);
- if (op->code == iro_Phi && get_irn_arity(node) == 0) {
+ if (op == op_Phi && get_irn_arity(node) == 0) {
/* special case, a Phi0 node, count on extra counter */
- op = status->op_Phi0;
+ op = status->op_Phi0 ? status->op_Phi0 : op;
}
- else if (op->code == iro_Phi && get_irn_mode(node) == mode_M) {
+ else if (op == op_Phi && get_irn_mode(node) == mode_M) {
/* special case, a Memory Phi node, count on extra counter */
- op = status->op_PhiM;
+ op = status->op_PhiM ? status->op_PhiM : op;
+ }
+ else if (op == op_Proj && get_irn_mode(node) == mode_M) {
+ /* special case, a Memory Proj node, count on extra counter */
+ op = status->op_ProjM ? status->op_ProjM : op;
+ }
+ else if (op == op_Mul &&
+ (get_irn_op(get_Mul_left(node)) == op_Const || get_irn_op(get_Mul_right(node)) == op_Const)) {
+ /* special case, a Multiply by a const, count on extra counter */
+ op = status->op_MulC ? status->op_MulC : op;
}
+ else if (op == op_Div && get_irn_op(get_Div_right(node)) == op_Const) {
+ /* special case, a division by a const, count on extra counter */
+ op = status->op_DivC ? status->op_DivC : op;
+ }
+ else if (op == op_Mod && get_irn_op(get_Mod_right(node)) == op_Const) {
+ /* special case, a module by a const, count on extra counter */
+ op = status->op_ModC ? status->op_ModC : op;
+ }
+ else if (op == op_DivMod && get_irn_op(get_DivMod_right(node)) == op_Const) {
+ /* special case, a division/modulo by a const, count on extra counter */
+ op = status->op_DivModC ? status->op_DivModC : op;
+ }
+ else if (op == op_Sel && get_irn_op(get_Sel_ptr(node)) == op_Sel) {
+ /* special case, a Sel of a Sel, count on extra counter */
+ op = status->op_SelSel ? status->op_SelSel : op;
+
+ if (get_irn_op(get_Sel_ptr(get_Sel_ptr(node))) == op_Sel) {
+ /* special case, a Sel of a Sel of a Sel, count on extra counter */
+ op = status->op_SelSelSel ? status->op_SelSelSel : op;
+ }
+ }
+
return op;
}
/**
* update the block counter
*/
-static void count_block_info(ir_node *node, graph_entry_t *graph)
+static void undate_block_info(ir_node *node, graph_entry_t *graph)
{
ir_op *op = get_irn_op(node);
ir_node *block;
/* check for block */
if (op == op_Block) {
arity = get_irn_arity(node);
- b_entry = block_get_entry(get_irn_node_nr(node), graph->block_hash);
+ b_entry = block_get_entry(&graph->recalc_cnts, get_irn_node_nr(node), graph->block_hash);
/* count all incoming edges */
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
ir_node *other_block = get_nodes_block(pred);
- block_entry_t *b_entry_other = block_get_entry(get_irn_node_nr(other_block), graph->block_hash);
+ block_entry_t *b_entry_other = block_get_entry(&graph->recalc_cnts, get_irn_node_nr(other_block), graph->block_hash);
cnt_inc(&b_entry->cnt_in_edges); /* an edge coming from another block */
cnt_inc(&b_entry_other->cnt_out_edges);
}
return;
}
- else if (op == op_Call) {
- // return;
- }
block = get_nodes_block(node);
- b_entry = block_get_entry(get_irn_node_nr(block), graph->block_hash);
+ b_entry = block_get_entry(&graph->recalc_cnts, get_irn_node_nr(block), graph->block_hash);
+
+ if (op == op_Phi && mode_is_datab(get_irn_mode(node))) {
+ /* count data Phi per block */
+ cnt_inc(&b_entry->cnt_phi_data);
+ }
- /* we have a new nodes */
+ /* we have a new node in our block */
cnt_inc(&b_entry->cnt_nodes);
+ /* don't count keep-alive edges */
+ if (get_irn_op(node) == op_End)
+ return;
+
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
ir_node *other_block;
- if (get_irn_op(pred) == op_Block)
- continue;
-
other_block = get_nodes_block(pred);
if (other_block == block)
cnt_inc(&b_entry->cnt_edges); /* a in block edge */
else {
- block_entry_t *b_entry_other = block_get_entry(get_irn_node_nr(other_block), graph->block_hash);
+ block_entry_t *b_entry_other = block_get_entry(&graph->recalc_cnts, get_irn_node_nr(other_block), graph->block_hash);
cnt_inc(&b_entry->cnt_in_edges); /* an edge coming from another block */
cnt_inc(&b_entry_other->cnt_out_edges);
}
/**
- * walker for reachable nodes count
+ * update the extended block counter
*/
-static void count_nodes(ir_node *node, void *env)
+static void undate_extbb_info(ir_node *node, graph_entry_t *graph)
{
- graph_entry_t *graph = env;
- node_entry_t *entry;
+ ir_op *op = get_irn_op(node);
+ ir_extblk *extbb;
+ extbb_entry_t *eb_entry;
+ int i, arity;
- ir_op *op = stat_get_irn_op(node);
- int arity = get_irn_arity(node);
+ /* check for block */
+ if (op == op_Block) {
+ extbb = get_nodes_extbb(node);
+ arity = get_irn_arity(node);
+ eb_entry = block_get_entry(&graph->recalc_cnts, get_extbb_node_nr(extbb), graph->extbb_hash);
- entry = opcode_get_entry(op, graph->opcode_hash);
+ /* count all incoming edges */
+ for (i = 0; i < arity; ++i) {
+ ir_node *pred = get_irn_n(node, i);
+ ir_extblk *other_extbb = get_nodes_extbb(pred);
- cnt_inc(&entry->cnt_alive);
- cnt_add_i(&graph->cnt_edges, arity);
+ if (extbb != other_extbb) {
+ extbb_entry_t *eb_entry_other = block_get_entry(&graph->recalc_cnts, get_extbb_node_nr(other_extbb), graph->extbb_hash);
- /* count block edges */
- count_block_info(node, graph);
-}
+ cnt_inc(&eb_entry->cnt_in_edges); /* an edge coming from another extbb */
+ cnt_inc(&eb_entry_other->cnt_out_edges);
+ }
+ }
+ return;
+ }
-/**
- * count all alive nodes and edges in a graph
- */
-static void count_nodes_in_graph(graph_entry_t *global, graph_entry_t *graph)
-{
- node_entry_t *entry;
+ extbb = get_nodes_extbb(node);
+ eb_entry = block_get_entry(&graph->recalc_cnts, get_extbb_node_nr(extbb), graph->extbb_hash);
- /* clear first the alive counter in the graph */
- for (entry = pset_first(graph->opcode_hash); entry; entry = pset_next(graph->opcode_hash)) {
- cnt_clr(&entry->cnt_alive);
+ if (op == op_Phi && mode_is_datab(get_irn_mode(node))) {
+ /* count data Phi per extbb */
+ cnt_inc(&eb_entry->cnt_phi_data);
}
- /* count the nodes in the graph */
- irg_walk_graph(graph->irg, count_nodes, NULL, graph);
-
- /* assume we walk every graph only ONCE, we could sum here the global count */
- for (entry = pset_first(graph->opcode_hash); entry; entry = pset_next(graph->opcode_hash)) {
- node_entry_t *g_entry = opcode_get_entry(entry->op, global->opcode_hash);
+ /* we have a new node in our block */
+ cnt_inc(&eb_entry->cnt_nodes);
- /* update the node counter */
- cnt_add(&g_entry->cnt_alive, &entry->cnt_alive);
- }
+ /* don't count keep-alive edges */
+ if (get_irn_op(node) == op_End)
+ return;
- /* update the edge counter */
- cnt_add(&global->cnt_edges, &graph->cnt_edges);
-}
+ arity = get_irn_arity(node);
-/**
- * register a dumper
- */
-static void stat_register_dumper(dumper_t *dumper)
-{
- dumper->next = status->dumper;
- status->dumper = dumper;
-}
+ for (i = 0; i < arity; ++i) {
+ ir_node *pred = get_irn_n(node, i);
+ ir_extblk *other_extbb = get_nodes_extbb(pred);
-/**
- * dumps an irg
- */
-static void dump_graph(graph_entry_t *entry)
-{
- dumper_t *dumper;
+ if (other_extbb == extbb)
+ cnt_inc(&eb_entry->cnt_edges); /* a in extbb edge */
+ else {
+ extbb_entry_t *eb_entry_other = block_get_entry(&graph->recalc_cnts, get_extbb_node_nr(other_extbb), graph->extbb_hash);
- for (dumper = status->dumper; dumper; dumper = dumper->next) {
- if (dumper->dump_graph)
- dumper->dump_graph(dumper, entry);
+ cnt_inc(&eb_entry->cnt_in_edges); /* an edge coming from another extbb */
+ cnt_inc(&eb_entry_other->cnt_out_edges);
+ }
}
}
-/**
- * initialise the dumper
- */
-static void dump_init(const char *name)
+/** calculates how many arguments of the call are const */
+static int cnt_const_args(ir_node *call)
{
- dumper_t *dumper;
+ int i, res = 0;
+ int n = get_Call_n_params(call);
- for (dumper = status->dumper; dumper; dumper = dumper->next) {
- if (dumper->init)
- dumper->init(dumper, name);
+ for (i = 0; i < n; ++i) {
+ ir_node *param = get_Call_param(call, i);
+ ir_op *op = get_irn_op(param);
+
+ if (op == op_Const || op == op_SymConst)
+ ++res;
}
+ return res;
}
/**
- * finish the dumper
+ * update info on calls
+ *
+ * @param call The call
+ * @param graph The graph entry containing the call
*/
-static void dump_finish(void)
+static void stat_update_call(ir_node *call, graph_entry_t *graph)
{
- dumper_t *dumper;
+ ir_node *block = get_nodes_block(call);
+ ir_node *ptr = get_Call_ptr(call);
+ entity *ent = NULL;
+ ir_graph *callee = NULL;
+ int num_const_args;
+
+ /*
+ * If the block is bad, the whole subgraph will collapse later
+ * so do not count this call.
+ * This happens in dead code.
+ */
+ if (is_Bad(block))
+ return;
- for (dumper = status->dumper; dumper; dumper = dumper->next) {
- if (dumper->finish)
- dumper->finish(dumper);
+ cnt_inc(&graph->cnt_all_calls);
+
+ /* found a call, this function is not a leaf */
+ graph->is_leaf = 0;
+
+ if (get_irn_op(ptr) == op_SymConst) {
+ if (get_SymConst_kind(ptr) == symconst_addr_ent) {
+ /* ok, we seems to know the entity */
+ ent = get_SymConst_entity(ptr);
+ callee = get_entity_irg(ent);
+
+ /* it is recursive, if it calls at least once */
+ if (callee == graph->irg)
+ graph->is_recursive = 1;
+ }
}
-}
+ else {
+ /* indirect call, be could not predict */
+ cnt_inc(&graph->cnt_indirect_calls);
-/* ---------------------------------------------------------------------- */
+ /* NOT a leaf call */
+ graph->is_leaf_call = LCS_NON_LEAF_CALL;
+ }
-/**
- * dumps a opcode hash into human readable form
- */
-static void simple_dump_opcode_hash(dumper_t *dmp, pset *set)
-{
- node_entry_t *entry;
- counter_t f_alive;
- counter_t f_new_node;
- counter_t f_Id;
+ /* check, if it's a chain-call: Then, the call-block
+ * must dominate the end block. */
+ {
+ ir_node *curr = get_irg_end_block(graph->irg);
+ int depth = get_Block_dom_depth(block);
- cnt_clr(&f_alive);
- cnt_clr(&f_new_node);
- cnt_clr(&f_Id);
+ for (; curr != block && get_Block_dom_depth(curr) > depth;) {
+ curr = get_Block_idom(curr);
- fprintf(dmp->f, "%-16s %-8s %-8s %-8s\n", "Opcode", "alive", "created", "->Id");
- for (entry = pset_first(set); entry; entry = pset_next(set)) {
- fprintf(dmp->f, "%-16s %8d %8d %8d\n",
- get_id_str(entry->op->name), entry->cnt_alive.cnt[0], entry->new_node.cnt[0], entry->into_Id.cnt[0]);
+ if (! curr || is_no_Block(curr))
+ break;
+ }
- cnt_add(&f_alive, &entry->cnt_alive);
- cnt_add(&f_new_node, &entry->new_node);
- cnt_add(&f_Id, &entry->into_Id);
+ if (curr != block)
+ graph->is_chain_call = 0;
}
- fprintf(dmp->f, "-------------------------------------------\n");
- fprintf(dmp->f, "%-16s %8d %8d %8d\n", "Sum",
- f_alive.cnt[0],
- f_new_node.cnt[0],
- f_Id.cnt[0]);
-}
-
-/**
- * dumps a optimization hash into human readable form
- */
-static void simple_dump_opt_hash(dumper_t *dmp, pset *set, int index)
-{
- opt_entry_t *entry = pset_first(set);
- if (entry) {
- fprintf(dmp->f, "\n%s:\n", opt_names[index]);
- fprintf(dmp->f, "%-16s %-8s\n", "Opcode", "deref");
+ /* check, if the callee is a leaf */
+ if (callee) {
+ graph_entry_t *called = graph_get_entry(callee, status->irg_hash);
- for (; entry; entry = pset_next(set)) {
- fprintf(dmp->f, "%-16s %8d\n",
- get_id_str(entry->op->name), entry->count.cnt[0]);
+ if (called->is_analyzed) {
+ if (! called->is_leaf)
+ graph->is_leaf_call = LCS_NON_LEAF_CALL;
}
}
-}
-/**
- * dumps the endges count
- */
-static void simple_dump_edges(dumper_t *dmp, counter_t *cnt)
-{
- fprintf(dmp->f, "%-16s %8d\n", "Edges", cnt->cnt[0]);
+ /* check, if arguments of the call are const */
+ num_const_args = cnt_const_args(call);
+
+ if (num_const_args > 0)
+ cnt_inc(&graph->cnt_call_with_cnst_arg);
}
/**
- * dumps the IRG
+ * update info on calls for graphs on the wait queue
*/
-static void simple_dump_graph(dumper_t *dmp, graph_entry_t *entry)
+static void stat_update_call_2(ir_node *call, graph_entry_t *graph)
{
- int dump_opts = 1;
- block_entry_t *b_entry;
-
- if (entry->irg) {
- ir_graph *const_irg = get_const_code_irg();
+ ir_node *block = get_nodes_block(call);
+ ir_node *ptr = get_Call_ptr(call);
+ entity *ent = NULL;
+ ir_graph *callee = NULL;
+
+ /*
+ * If the block is bad, the whole subgraph will collapse later
+ * so do not count this call.
+ * This happens in dead code.
+ */
+ if (is_Bad(block))
+ return;
- if (entry->irg == const_irg) {
- fprintf(dmp->f, "\nConst code Irg %p", (void *)entry->irg);
- }
- else {
- if (entry->ent)
- fprintf(dmp->f, "\nEntity %s, Irg %p", get_entity_name(entry->ent), (void *)entry->irg);
- else
- fprintf(dmp->f, "\nIrg %p", (void *)entry->irg);
+ if (get_irn_op(ptr) == op_SymConst) {
+ if (get_SymConst_kind(ptr) == symconst_addr_ent) {
+ /* ok, we seems to know the entity */
+ ent = get_SymConst_entity(ptr);
+ callee = get_entity_irg(ent);
}
-
- fprintf(dmp->f, " %swalked %d over blocks %d was inlined %d got inlined %d strength red %d:\n",
- entry->deleted ? "DELETED " : "",
- entry->cnt_walked.cnt[0], entry->cnt_walked_blocks.cnt[0],
- entry->cnt_was_inlined.cnt[0],
- entry->cnt_got_inlined.cnt[0],
- entry->cnt_strength_red.cnt[0]
- );
- }
- else {
- fprintf(dmp->f, "\nGlobals counts:\n");
- fprintf(dmp->f, "--------------\n");
- dump_opts = 0;
}
- simple_dump_opcode_hash(dmp, entry->opcode_hash);
- simple_dump_edges(dmp, &entry->cnt_edges);
+ /* check, if the callee is a leaf */
+ if (callee) {
+ graph_entry_t *called = graph_get_entry(callee, status->irg_hash);
- /* effects of optimizations */
- if (dump_opts) {
- int i;
-
- for (i = 0; i < sizeof(entry->opt_hash)/sizeof(entry->opt_hash[0]); ++i) {
- simple_dump_opt_hash(dmp, entry->opt_hash[i], i);
- }
+ assert(called->is_analyzed);
- /* dump block info */
- fprintf(dmp->f, "\n%12s %12s %12s %12s %12s %12s\n", "Block Nr", "Nodes", "intern E", "incoming E", "outgoing E", "quot");
- for (b_entry = pset_first(entry->block_hash);
- b_entry;
- b_entry = pset_next(entry->block_hash)) {
- fprintf(dmp->f, "BLK %12ld %12u %12u %12u %12u %4.8f\n",
- b_entry->block_nr,
- b_entry->cnt_nodes.cnt[0],
- b_entry->cnt_edges.cnt[0],
- b_entry->cnt_in_edges.cnt[0],
- b_entry->cnt_out_edges.cnt[0],
- (double)b_entry->cnt_edges.cnt[0] / (double)b_entry->cnt_nodes.cnt[0]
- );
- }
+ if (! called->is_leaf)
+ graph->is_leaf_call = LCS_NON_LEAF_CALL;
}
+ else
+ graph->is_leaf_call = LCS_NON_LEAF_CALL;
}
/**
- * initialise the simple dumper
+ * walker for reachable nodes count
*/
-static void simple_init(dumper_t *dmp, const char *name)
+static void update_node_stat(ir_node *node, void *env)
{
- char fname[2048];
+ graph_entry_t *graph = env;
+ node_entry_t *entry;
+
+ ir_op *op = stat_get_irn_op(node);
+ int arity = get_irn_arity(node);
+
+ entry = opcode_get_entry(op, graph->opcode_hash);
+
+ cnt_inc(&entry->cnt_alive);
+ cnt_add_i(&graph->cnt_edges, arity);
- snprintf(fname, sizeof(fname), "%s.txt", name);
- dmp->f = fopen(fname, "w");
+ /* count block edges */
+ undate_block_info(node, graph);
+
+ /* count extended block edges */
+ if (status->stat_options & FIRMSTAT_COUNT_EXTBB) {
+ undate_extbb_info(node, graph);
+ }
+
+ /* handle statistics for special node types */
+
+ if (op == op_Const) {
+ if (status->stat_options & FIRMSTAT_COUNT_CONSTS) {
+ /* check properties of constants */
+ stat_update_const(status, node, graph);
+ }
+ }
+ else if (op == op_Call) {
+ /* check for properties that depends on calls like recursion/leaf/indirect call */
+ stat_update_call(node, graph);
+ }
}
/**
- * finishes the simple dumper
+ * walker for reachable nodes count for graphs on the wait_q
*/
-static void simple_finish(dumper_t *dmp)
+static void update_node_stat_2(ir_node *node, void *env)
{
- fclose(dmp->f);
- dmp->f = NULL;
+ graph_entry_t *graph = env;
+
+ /* check for properties that depends on calls like recursion/leaf/indirect call */
+ if (is_Call(node))
+ stat_update_call_2(node, graph);
}
/**
- * the simple human readable dumper
+ * get the current address mark
*/
-static dumper_t simple_dumper = {
- simple_dump_graph,
- simple_init,
- simple_finish,
- NULL,
- NULL,
-};
+static unsigned get_adr_mark(graph_entry_t *graph, ir_node *node)
+{
+ address_mark_entry_t *value = set_find(graph->address_mark, &node, sizeof(*value), HASH_PTR(node));
-/* ---------------------------------------------------------------------- */
+ return value ? value->mark : 0;
+}
/**
- * count the nodes as needed:
- *
- * 1 normal (data) Phi's
- * 2 memory Phi's
- * 3 Proj
- * 0 all other nodes
+ * set the current address mark
*/
-static void csv_count_nodes(graph_entry_t *graph, counter_t cnt[])
+static void set_adr_mark(graph_entry_t *graph, ir_node *node, unsigned val)
{
- node_entry_t *entry;
- int i;
+ address_mark_entry_t *value = set_insert(graph->address_mark, &node, sizeof(*value), HASH_PTR(node));
- for (i = 0; i < 4; ++i)
- cnt_clr(&cnt[i]);
+ value->mark = val;
+}
- for (entry = pset_first(graph->opcode_hash); entry; entry = pset_next(graph->opcode_hash)) {
- if (entry->op == op_Phi) {
- /* normal Phi */
- cnt_add(&cnt[1], &entry->cnt_alive);
- }
- else if (entry->op == status->op_PhiM) {
- /* memory Phi */
- cnt_add(&cnt[2], &entry->cnt_alive);
- }
- else if (entry->op == op_Proj) {
- /* Proj */
- cnt_add(&cnt[3], &entry->cnt_alive);
- }
- else {
- /* all other nodes */
- cnt_add(&cnt[0], &entry->cnt_alive);
- }
- }
+/**
+ * a vcg attribute hook: Color a node with a different color if
+ * it's identified as a part of an address expression or at least referenced
+ * by an address expression.
+ */
+static int stat_adr_mark_hook(FILE *F, ir_node *node, ir_node *local)
+{
+ ir_node *n = local ? local : node;
+ ir_graph *irg = get_irn_irg(n);
+ graph_entry_t *graph = graph_get_entry(irg, status->irg_hash);
+ unsigned mark = get_adr_mark(graph, n);
+
+ if (mark & MARK_ADDRESS_CALC)
+ fprintf(F, "color: purple");
+ else if ((mark & (MARK_REF_ADR | MARK_REF_NON_ADR)) == MARK_REF_ADR)
+ fprintf(F, "color: pink");
+ else if ((mark & (MARK_REF_ADR | MARK_REF_NON_ADR)) == (MARK_REF_ADR|MARK_REF_NON_ADR))
+ fprintf(F, "color: lightblue");
+ else
+ return 0;
+
+ /* I know the color! */
+ return 1;
}
/**
- * dumps the IRG
+ * walker that marks every node that is an address calculation
+ *
+ * predecessor nodes must be visited first. We ensure this by
+ * calling in in the post of an outs walk. This should work even in cycles,
+ * while the pre in a normal walk will not.
*/
-static void csv_dump_graph(dumper_t *dmp, graph_entry_t *entry)
+static void mark_address_calc(ir_node *node, void *env)
{
- const char *name;
+ graph_entry_t *graph = env;
+ ir_mode *mode = get_irn_mode(node);
+ int i, n;
+ unsigned mark_preds = MARK_REF_NON_ADR;
- counter_t cnt[4];
+ if (! mode_is_numP(mode))
+ return;
- if (entry->irg && !entry->deleted) {
- ir_graph *const_irg = get_const_code_irg();
+ if (mode_is_reference(mode)) {
+ /* a reference is calculated here, we are sure */
+ set_adr_mark(graph, node, MARK_ADDRESS_CALC);
- if (entry->irg == const_irg) {
- name = "<Const code Irg>";
- return;
- }
- else {
- if (entry->ent)
- name = get_entity_name(entry->ent);
- else
- name = "<UNKNOWN IRG>";
+ mark_preds = MARK_REF_ADR;
+ }
+ else {
+ unsigned mark = get_adr_mark(graph, node);
+
+ if ((mark & (MARK_REF_ADR | MARK_REF_NON_ADR)) == MARK_REF_ADR) {
+ /*
+ * this node has not an reference mode, but is only
+ * referenced by address calculations
+ */
+ mark_preds = MARK_REF_ADR;
}
+ }
- csv_count_nodes(entry, cnt);
+ /* mark all predecessors */
+ for (i = 0, n = get_irn_arity(node); i < n; ++i) {
+ ir_node *pred = get_irn_n(node, i);
- fprintf(dmp->f, "%-40s, %p, %d, %d, %d, %d\n",
- name,
- (void *)entry->irg,
- cnt[0].cnt[0],
- cnt[1].cnt[0],
- cnt[2].cnt[0],
- cnt[3].cnt[0]
- );
+ set_adr_mark(graph, pred, get_adr_mark(graph, pred) | mark_preds);
}
}
/**
- * initialise the simple dumper
+ * Called for every graph when the graph is either deleted or stat_dump_snapshot()
+ * is called, must recalculate all statistic info.
+ *
+ * @param global The global entry
+ * @param graph The current entry
*/
-static void csv_init(dumper_t *dmp, const char *name)
+static void update_graph_stat(graph_entry_t *global, graph_entry_t *graph)
{
- char fname[2048];
+ node_entry_t *entry;
+
+ /* clear first the alive counter in the graph */
+ for (entry = pset_first(graph->opcode_hash); entry; entry = pset_next(graph->opcode_hash)) {
+ cnt_clr(&entry->cnt_alive);
+ }
+
+ /* set pessimistic values */
+ graph->is_leaf = 1;
+ graph->is_leaf_call = LCS_UNKNOWN;
+ graph->is_recursive = 0;
+ graph->is_chain_call = 1;
+
+ /* create new block counter */
+ graph->block_hash = new_pset(block_cmp, 5);
+
+ /* we need dominator info */
+ if (graph->irg != get_const_code_irg())
+ if (get_irg_dom_state(graph->irg) != dom_consistent)
+ compute_doms(graph->irg);
+
+ if (status->stat_options & FIRMSTAT_COUNT_EXTBB) {
+ /* we need extended basic blocks */
+ compute_extbb(graph->irg);
+
+ /* create new extbb counter */
+ graph->extbb_hash = new_pset(block_cmp, 5);
+ }
+
+ /* count the nodes in the graph */
+ irg_walk_graph(graph->irg, update_node_stat, NULL, graph);
+
+#if 0
+ /* Uncomment this code if chain-call means call exact one */
+ entry = opcode_get_entry(op_Call, graph->opcode_hash);
+
+ /* check if we have more than 1 call */
+ if (cnt_gt(entry->cnt_alive, 1))
+ graph->is_chain_call = 0;
+#endif
+
+ /* recursive functions are never chain calls, leafs don't have calls */
+ if (graph->is_recursive || graph->is_leaf)
+ graph->is_chain_call = 0;
+
+ /* assume we walk every graph only ONCE, we could sum here the global count */
+ for (entry = pset_first(graph->opcode_hash); entry; entry = pset_next(graph->opcode_hash)) {
+ node_entry_t *g_entry = opcode_get_entry(entry->op, global->opcode_hash);
- snprintf(fname, sizeof(fname), "%s.csv", name);
- dmp->f = fopen(fname, "a");
+ /* update the node counter */
+ cnt_add(&g_entry->cnt_alive, &entry->cnt_alive);
+ }
+
+ /* update the edge counter */
+ cnt_add(&global->cnt_edges, &graph->cnt_edges);
+
+ /* count the number of address calculation */
+ if (graph->irg != get_const_code_irg()) {
+ ir_graph *rem = current_ir_graph;
+
+ if (get_irg_outs_state(graph->irg) != outs_consistent)
+ compute_irg_outs(graph->irg);
+
+ /* Must be done an the outs graph */
+ current_ir_graph = graph->irg;
+ irg_out_walk(get_irg_start(graph->irg), NULL, mark_address_calc, graph);
+ current_ir_graph = rem;
+
+#if 0
+ set_dump_node_vcgattr_hook(stat_adr_mark_hook);
+ dump_ir_block_graph(graph->irg, "-adr");
+ set_dump_node_vcgattr_hook(NULL);
+#endif
+ }
+
+ /* count the DAG's */
+ if (status->stat_options & FIRMSTAT_COUNT_DAG)
+ count_dags_in_graph(global, graph);
+
+ /* calculate the patterns of this graph */
+ stat_calc_pattern_history(graph->irg);
+
+ /* leaf function did not call others */
+ if (graph->is_leaf)
+ graph->is_leaf_call = LCS_NON_LEAF_CALL;
+ else if (graph->is_leaf_call == LCS_UNKNOWN) {
+ /* we still don't know if this graph calls leaf-functions, so enqueue */
+ pdeq_putl(status->wait_q, graph);
+ }
+
+ /* we have analyzed this graph */
+ graph->is_analyzed = 1;
}
/**
- * finishes the simple dumper
+ * Called for every graph that was on the wait_q in stat_dump_snapshot()
+ * must finish all statistic info calculations.
+ *
+ * @param global The global entry
+ * @param graph The current entry
*/
-static void csv_finish(dumper_t *dmp)
+static void update_graph_stat_2(graph_entry_t *global, graph_entry_t *graph)
{
- fclose(dmp->f);
- dmp->f = NULL;
+ if (graph->is_deleted) {
+ /* deleted, ignore */
+ return;
+ }
+
+ if (graph->irg) {
+ /* count the nodes in the graph */
+ irg_walk_graph(graph->irg, update_node_stat_2, NULL, graph);
+
+ if (graph->is_leaf_call == LCS_UNKNOWN)
+ graph->is_leaf_call = LCS_LEAF_CALL;
+ }
}
/**
- * the simple human readable dumper
+ * register a dumper
*/
-static dumper_t csv_dumper = {
- csv_dump_graph,
- csv_init,
- csv_finish,
- NULL,
- NULL,
-};
+static void stat_register_dumper(const dumper_t *dumper)
+{
+ dumper_t *p = xmalloc(sizeof(*p));
+ if (p) {
+ *p = *dumper;
-/* ---------------------------------------------------------------------- */
+ p->next = status->dumper;
+ p->status = status;
+ status->dumper = p;
+ }
-/*
- * helper: get an ir_op from an opcode
- */
-ir_op *stat_get_op_from_opcode(opcode code)
-{
- return opcode_find_entry(code, status->ir_op_hash);
+ /* FIXME: memory leak */
}
-/* initialize the statistics module. */
-void init_stat(unsigned enable_options)
+/**
+ * dumps an IR graph.
+ */
+static void stat_dump_graph(graph_entry_t *entry)
{
-#define X(a) a, sizeof(a)-1
-
- int pseudo_id = 0;
+ dumper_t *dumper;
- /* enable statistics */
- status->enable = enable_options & FIRMSTAT_ENABLED;
+ for (dumper = status->dumper; dumper; dumper = dumper->next) {
+ if (dumper->dump_graph)
+ dumper->dump_graph(dumper, entry);
+ }
+}
- if (! status->enable)
- return;
+/**
+ * dumps a constant table
+ */
+static void stat_dump_consts(const constant_info_t *tbl)
+{
+ dumper_t *dumper;
- obstack_init(&status->cnts);
+ for (dumper = status->dumper; dumper; dumper = dumper->next) {
+ if (dumper->dump_const_tbl)
+ dumper->dump_const_tbl(dumper, tbl);
+ }
+}
- /* build the pseudo-ops */
- _op_Phi0.code = --pseudo_id;
- _op_Phi0.name = new_id_from_chars(X("Phi0"));
+/**
+ * initialize the dumper
+ */
+static void stat_dump_init(const char *name)
+{
+ dumper_t *dumper;
- _op_PhiM.code = --pseudo_id;
- _op_PhiM.name = new_id_from_chars(X("PhiM"));
+ for (dumper = status->dumper; dumper; dumper = dumper->next) {
+ if (dumper->init)
+ dumper->init(dumper, name);
+ }
+}
- /* create the hash-tables */
- status->irg_hash = new_pset(graph_cmp, 8);
- status->ir_op_hash = new_pset(opcode_cmp_2, 1);
+/**
+ * finish the dumper
+ */
+static void stat_dump_finish(void)
+{
+ dumper_t *dumper;
- status->op_Phi0 = &_op_Phi0;
- status->op_PhiM = &_op_PhiM;
+ for (dumper = status->dumper; dumper; dumper = dumper->next) {
+ if (dumper->finish)
+ dumper->finish(dumper);
+ }
+}
- stat_register_dumper(&simple_dumper);
- stat_register_dumper(&csv_dumper);
+/* ---------------------------------------------------------------------- */
- /* initialize the pattern hash */
- stat_init_pattern_history(enable_options & FIRMSTAT_PATTERN_ENABLED);
-#undef X
+/*
+ * helper: get an ir_op from an opcode
+ */
+ir_op *stat_get_op_from_opcode(opcode code)
+{
+ return opcode_find_entry(code, status->ir_op_hash);
}
-/* A new IR op is registered. */
-void stat_new_ir_op(const ir_op *op)
+/**
+ * A new IR op is registered.
+ *
+ * @param ctx the hook context
+ * @param op the new IR opcode that was created.
+ */
+static void stat_new_ir_op(void *ctx, ir_op *op)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
STAT_LEAVE;
}
-/* An IR op is freed. */
-void stat_free_ir_op(const ir_op *op)
+/**
+ * An IR op is freed.
+ *
+ * @param ctx the hook context
+ * @param op the IR opcode that is freed
+ */
+static void stat_free_ir_op(void *ctx, ir_op *op)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
STAT_LEAVE;
}
-/* A new node is created. */
-void stat_new_node(const ir_node *node)
+/**
+ * A new node is created.
+ *
+ * @param ctx the hook context
+ * @param irg the IR graph on which the node is created
+ * @param node the new IR node that was created
+ */
+static void stat_new_node(void *ctx, ir_graph *irg, ir_node *node)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
/* do NOT count during dead node elimination */
STAT_LEAVE;
}
-/* A node is changed into a Id node */
-void stat_turn_into_id(const ir_node *node)
+/**
+ * A node is changed into a Id node
+ *
+ * @param ctx the hook context
+ * @param node the IR node that will be turned into an ID
+ */
+static void stat_turn_into_id(void *ctx, ir_node *node)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
STAT_LEAVE;
}
-/* A new graph was created */
-void stat_new_graph(ir_graph *irg, entity *ent)
+/**
+ * A new graph was created
+ *
+ * @param ctx the hook context
+ * @param irg the new IR graph that was created
+ * @param ent the entity of this graph
+ */
+static void stat_new_graph(void *ctx, ir_graph *irg, entity *ent)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
/* execute for side effect :-) */
graph_entry_t * graph = graph_get_entry(irg, status->irg_hash);
- graph->ent = ent;
- graph->deleted = 0;
+ graph->ent = ent;
+ graph->is_deleted = 0;
+ graph->is_leaf = 0;
+ graph->is_leaf_call = 0;
+ graph->is_recursive = 0;
+ graph->is_chain_call = 0;
+ graph->is_analyzed = 0;
}
STAT_LEAVE;
}
-/*
- * A graph was deleted
+/**
+ * A graph will be deleted
+ *
+ * @param ctx the hook context
+ * @param irg the IR graph that will be deleted
+ *
+ * Note that we still hold the information for this graph
+ * in our hash maps, only a flag is set which prevents this
+ * information from being changed, it's "frozen" from now.
*/
-void stat_free_graph(ir_graph *irg)
+static void stat_free_graph(void *ctx, ir_graph *irg)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
graph_entry_t *graph = graph_get_entry(irg, status->irg_hash);
graph_entry_t *global = graph_get_entry(NULL, status->irg_hash);
- graph->deleted = 1;
-
- /* count the nodes of the graph yet, it will be destroyed later */
- count_nodes_in_graph(global, graph);
-
- /* count the DAG's */
-// count_dags_in_graph(global, graph);
+ graph->is_deleted = 1;
- /* calculate the pattern */
- stat_calc_pattern_history(irg);
+ if (status->stat_options & FIRMSTAT_COUNT_DELETED) {
+ /* count the nodes of the graph yet, it will be destroyed later */
+ update_graph_stat(global, graph);
+ }
}
STAT_LEAVE;
}
-/*
+/**
* A walk over a graph is initiated. Do not count walks from statistic code.
+ *
+ * @param ctx the hook context
+ * @param irg the IR graph that will be walked
+ * @param pre the pre walker
+ * @param post the post walker
*/
-void stat_irg_walk(ir_graph *irg, void *pre, void *post)
+static void stat_irg_walk(void *ctx, ir_graph *irg, generic_func *pre, generic_func *post)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER_SINGLE;
STAT_LEAVE;
}
-/*
+/**
* A walk over a graph in block-wise order is initiated. Do not count walks from statistic code.
+ *
+ * @param ctx the hook context
+ * @param irg the IR graph that will be walked
+ * @param pre the pre walker
+ * @param post the post walker
*/
-void stat_irg_walk_blkwise(ir_graph *irg, void *pre, void *post)
+static void stat_irg_walk_blkwise(void *ctx, ir_graph *irg, generic_func *pre, generic_func *post)
{
/* for now, do NOT differentiate between blockwise and normal */
- stat_irg_walk(irg, pre, post);
+ stat_irg_walk(ctx, irg, pre, post);
}
-/*
+/**
* A walk over the graph's blocks is initiated. Do not count walks from statistic code.
+ *
+ * @param ctx the hook context
+ * @param irg the IR graph that will be walked
+ * @param node the IR node
+ * @param pre the pre walker
+ * @param post the post walker
*/
-void stat_irg_block_walk(ir_graph *irg, const ir_node *node, void *pre, void *post)
+static void stat_irg_block_walk(void *ctx, ir_graph *irg, ir_node *node, generic_func *pre, generic_func *post)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER_SINGLE;
}
/**
- * called for every node that is removed due to an optimization
+ * called for every node that is removed due to an optimization.
+ *
+ * @param n the IR node that will be removed
+ * @param hmap the hash map containing ir_op* -> opt_entry_t*
*/
-static void removed_due_opt(ir_node *n, pset *set)
+static void removed_due_opt(ir_node *n, hmap_opt_entry_t *hmap)
{
- ir_op *op = get_irn_op(n);
- opt_entry_t *entry = opt_get_entry(op, set);
+ ir_op *op = stat_get_irn_op(n);
+ opt_entry_t *entry = opt_get_entry(op, hmap);
/* increase global value */
cnt_inc(&entry->count);
}
-/*
- * Some nodes were optimized into some others due to an optimization
+/**
+ * Some nodes were optimized into some others due to an optimization.
+ *
+ * @param ctx the hook context
*/
-void stat_merge_nodes(
+static void stat_merge_nodes(
+ void *ctx,
ir_node **new_node_array, int new_num_entries,
ir_node **old_node_array, int old_num_entries,
- stat_opt_kind opt)
+ hook_opt_kind opt)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
int i, j;
graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash);
+ if (status->reassoc_run)
+ opt = HOOK_OPT_REASSOC;
+
for (i = 0; i < old_num_entries; ++i) {
for (j = 0; j < new_num_entries; ++j)
if (old_node_array[i] == new_node_array[j])
/* nodes might be in new and old, these are NOT removed */
if (j >= new_num_entries) {
- removed_due_opt(old_node_array[i], graph->opt_hash[opt]);
+ int xopt = opt;
+
+ /* sometimes we did not detect, that it is replaced by a Const */
+ if (opt == HOOK_OPT_CONFIRM && new_num_entries == 1) {
+ ir_op *op = get_irn_op(new_node_array[0]);
+
+ if (op == op_Const || op == op_SymConst)
+ xopt = HOOK_OPT_CONFIRM_C;
+ }
+
+ removed_due_opt(old_node_array[i], graph->opt_hash[xopt]);
}
}
}
STAT_LEAVE;
}
-/*
+/**
+ * Reassociation is started/stopped.
+ *
+ * @param ctx the hook context
+ * @param flag if non-zero, reassociation is started else stopped
+ */
+static void stat_reassociate(void *ctx, int flag)
+{
+ if (! status->stat_options)
+ return;
+
+ STAT_ENTER;
+ {
+ status->reassoc_run = flag;
+ }
+ STAT_LEAVE;
+}
+
+/**
* A node was lowered into other nodes
+ *
+ * @param ctx the hook context
+ * @param node the IR node that will be lowered
*/
-void stat_lower(ir_node *node)
+static void stat_lower(void *ctx, ir_node *node)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
{
graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash);
- removed_due_opt(node, graph->opt_hash[STAT_LOWERED]);
+ removed_due_opt(node, graph->opt_hash[HOOK_LOWERED]);
}
STAT_LEAVE;
}
-/*
- * A graph was inlined
+/**
+ * A graph was inlined.
+ *
+ * @param ctx the hook context
+ * @param call the IR call that will re changed into the body of
+ * the called IR graph
+ * @param called_irg the IR graph representing the called routine
*/
-void stat_inline(ir_node *call, ir_graph *called_irg)
+static void stat_inline(void *ctx, ir_node *call, ir_graph *called_irg)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
STAT_LEAVE;
}
-/*
+/**
* A graph with tail-recursions was optimized.
+ *
+ * @param ctx the hook context
*/
-void stat_tail_rec(ir_graph *irg)
+static void stat_tail_rec(void *ctx, ir_graph *irg, int n_calls)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
{
+ graph_entry_t *graph = graph_get_entry(irg, status->irg_hash);
+
+ graph->num_tail_recursion += n_calls;
}
STAT_LEAVE;
}
-/*
+/**
* Strength reduction was performed on an iteration variable.
+ *
+ * @param ctx the hook context
*/
-void stat_strength_red(ir_graph *irg, ir_node *strong, ir_node *cmp)
+static void stat_strength_red(void *ctx, ir_graph *irg, ir_node *strong, ir_node *cmp)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
STAT_ENTER;
graph_entry_t *graph = graph_get_entry(irg, status->irg_hash);
cnt_inc(&graph->cnt_strength_red);
- removed_due_opt(strong, graph->opt_hash[STAT_OPT_STRENGTH_RED]);
+ removed_due_opt(strong, graph->opt_hash[HOOK_OPT_STRENGTH_RED]);
}
STAT_LEAVE;
}
-/*
- * Start the dead node elimination.
+/**
+ * Start/Stop the dead node elimination.
+ *
+ * @param ctx the hook context
+ */
+static void stat_dead_node_elim(void *ctx, ir_graph *irg, int start)
+{
+ if (! status->stat_options)
+ return;
+
+ if (start)
+ ++status->in_dead_node_elim;
+ else
+ --status->in_dead_node_elim;
+}
+
+/**
+ * if-conversion was tried
+ */
+static void stat_if_conversion(void *context, ir_graph *irg, ir_node *phi,
+ int pos, ir_node *mux, if_result_t reason)
+{
+ if (! status->stat_options)
+ return;
+
+ STAT_ENTER;
+ {
+ graph_entry_t *graph = graph_get_entry(irg, status->irg_hash);
+
+ cnt_inc(&graph->cnt_if_conv[reason]);
+ }
+ STAT_LEAVE;
+}
+
+/**
+ * real function call was optimized
+ */
+static void stat_func_call(void *context, ir_graph *irg, ir_node *call)
+{
+ if (! status->stat_options)
+ return;
+
+ STAT_ENTER;
+ {
+ graph_entry_t *graph = graph_get_entry(irg, status->irg_hash);
+
+ cnt_inc(&graph->cnt_real_func_call);
+ }
+ STAT_LEAVE;
+}
+
+/**
+ * A multiply was replaced by a series of Shifts/Adds/Subs
+ *
+ * @param ctx the hook context
+ */
+static void stat_arch_dep_replace_mul_with_shifts(void *ctx, ir_node *mul)
+{
+ if (! status->stat_options)
+ return;
+
+ STAT_ENTER;
+ {
+ graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash);
+ removed_due_opt(mul, graph->opt_hash[HOOK_OPT_ARCH_DEP]);
+ }
+ STAT_LEAVE;
+}
+
+/**
+ * A division by const was replaced
+ *
+ * @param ctx the hook context
+ * @param node the division node that will be optimized
*/
-void stat_dead_node_elim_start(ir_graph *irg)
+static void stat_arch_dep_replace_division_by_const(void *ctx, ir_node *node)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
- ++status->in_dead_node_elim;
+ STAT_ENTER;
+ {
+ graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash);
+ removed_due_opt(node, graph->opt_hash[HOOK_OPT_ARCH_DEP]);
+ }
+ STAT_LEAVE;
}
/*
- * Stops the dead node elimination.
+ * Update the register pressure of a block
+ *
+ * @param irg the irg containing the block
+ * @param block the block for which the reg pressure should be set
+ * @param pressure the pressure
+ * @param class_name the name of the register class
+ */
+void stat_be_block_regpressure(ir_graph *irg, ir_node *block, int pressure, const char *class_name)
+{
+ if (! status->stat_options)
+ return;
+
+ STAT_ENTER;
+ {
+ graph_entry_t *graph = graph_get_entry(irg, status->irg_hash);
+ be_block_entry_t *block_ent;
+ reg_pressure_entry_t *rp_ent;
+
+ block_ent = be_block_get_entry(&status->be_data, get_irn_node_nr(block), graph->be_block_hash);
+ rp_ent = obstack_alloc(&status->be_data, sizeof(*rp_ent));
+ memset(rp_ent, 0, sizeof(*rp_ent));
+
+ rp_ent->class_name = class_name;
+ rp_ent->pressure = pressure;
+
+ pset_insert(block_ent->reg_pressure, rp_ent, HASH_PTR(class_name));
+ }
+ STAT_LEAVE;
+}
+
+/**
+ * Update the distribution of ready nodes of a block
+ *
+ * @param irg the irg containing the block
+ * @param block the block for which the reg pressure should be set
+ * @param num_ready the number of ready nodes
+ */
+void stat_be_block_sched_ready(ir_graph *irg, ir_node *block, int num_ready)
+{
+ if (! status->stat_options)
+ return;
+
+ STAT_ENTER;
+ {
+ graph_entry_t *graph = graph_get_entry(irg, status->irg_hash);
+ be_block_entry_t *block_ent;
+
+ block_ent = be_block_get_entry(&status->be_data, get_irn_node_nr(block), graph->be_block_hash);
+
+ /* increase the counter of corresponding number of ready nodes */
+ stat_inc_int_distrib_tbl(block_ent->sched_ready, num_ready);
+ }
+ STAT_LEAVE;
+}
+
+/**
+ * Update the permutation statistic of a block
+ *
+ * @param class_name the name of the register class
+ * @param n_regs number of registers in the register class
+ * @param perm the perm node
+ * @param block the block containing the perm
+ * @param size the size of the perm
+ * @param real_size number of pairs with different registers
*/
-void stat_dead_node_elim_stop(ir_graph *irg)
+void stat_be_block_stat_perm(const char *class_name, int n_regs, ir_node *perm, ir_node *block,
+ int size, int real_size)
{
- if (! status->enable)
+ if (! status->stat_options)
return;
- --status->in_dead_node_elim;
+ STAT_ENTER;
+ {
+ graph_entry_t *graph = graph_get_entry(get_irn_irg(block), status->irg_hash);
+ be_block_entry_t *block_ent;
+ perm_class_entry_t *pc_ent;
+ perm_stat_entry_t *ps_ent;
+
+ block_ent = be_block_get_entry(&status->be_data, get_irn_node_nr(block), graph->be_block_hash);
+ pc_ent = perm_class_get_entry(&status->be_data, class_name, block_ent->perm_class_stat);
+ ps_ent = perm_stat_get_entry(&status->be_data, perm, pc_ent->perm_stat);
+
+ pc_ent->n_regs = n_regs;
+
+ /* update information */
+ ps_ent->size = size;
+ ps_ent->real_size = real_size;
+ }
+ STAT_LEAVE;
+}
+
+/**
+ * Update the permutation statistic of a single perm
+ *
+ * @param class_name the name of the register class
+ * @param perm the perm node
+ * @param block the block containing the perm
+ * @param is_chain 1 if chain, 0 if cycle
+ * @param size length of the cycle/chain
+ * @param n_ops the number of ops representing this cycle/chain after lowering
+ */
+void stat_be_block_stat_permcycle(const char *class_name, ir_node *perm, ir_node *block,
+ int is_chain, int size, int n_ops)
+{
+ if (! status->stat_options)
+ return;
+
+ STAT_ENTER;
+ {
+ graph_entry_t *graph = graph_get_entry(get_irn_irg(block), status->irg_hash);
+ be_block_entry_t *block_ent;
+ perm_class_entry_t *pc_ent;
+ perm_stat_entry_t *ps_ent;
+
+ block_ent = be_block_get_entry(&status->be_data, get_irn_node_nr(block), graph->be_block_hash);
+ pc_ent = perm_class_get_entry(&status->be_data, class_name, block_ent->perm_class_stat);
+ ps_ent = perm_stat_get_entry(&status->be_data, perm, pc_ent->perm_stat);
+
+ if (is_chain) {
+ ps_ent->n_copies += n_ops;
+ stat_inc_int_distrib_tbl(ps_ent->chains, size);
+ }
+ else {
+ ps_ent->n_exchg += n_ops;
+ stat_inc_int_distrib_tbl(ps_ent->cycles, size);
+ }
+ }
+ STAT_LEAVE;
}
-/* Finish the statistics */
-void stat_finish(const char *name)
+/* Dumps a statistics snapshot */
+void stat_dump_snapshot(const char *name, const char *phase)
{
- if (! status->enable)
+ char fname[2048];
+ const char *p;
+ int l;
+
+ if (! status->stat_options)
return;
STAT_ENTER;
graph_entry_t *entry;
graph_entry_t *global = graph_get_entry(NULL, status->irg_hash);
- dump_init(name);
+ /*
+ * The constant counter is only global, so we clear it here.
+ * Note that it does NOT contain the constants in DELETED
+ * graphs due to this.
+ */
+ if (status->stat_options & FIRMSTAT_COUNT_CONSTS)
+ stat_const_clear(status);
+
+ /* build the name */
+ p = strrchr(name, '/');
+#ifdef _WIN32
+ {
+ const char *q;
- /* dump per graph */
+ q = strrchr(name, '\\');
+
+ /* NULL might be not the smallest pointer */
+ if (q && (!p || q > p))
+ p = q;
+ }
+#endif
+ if (p) {
+ ++p;
+ l = p - name;
+
+ if (l > sizeof(fname) - 1)
+ l = sizeof(fname) - 1;
+
+ memcpy(fname, name, l);
+ fname[l] = '\0';
+ }
+ else {
+ fname[0] = '\0';
+ p = name;
+ }
+ strncat(fname, "firmstat-", sizeof(fname));
+ strncat(fname, phase, sizeof(fname));
+ strncat(fname, "-", sizeof(fname));
+ strncat(fname, p, sizeof(fname));
+
+ stat_dump_init(fname);
+
+ /* calculate the graph statistics */
for (entry = pset_first(status->irg_hash); entry; entry = pset_next(status->irg_hash)) {
if (entry->irg == NULL) {
continue;
}
- if (! entry->deleted) {
+ if (! entry->is_deleted) {
/* the graph is still alive, count the nodes on it */
- count_nodes_in_graph(global, entry);
+ update_graph_stat(global, entry);
+ }
+ }
- /* count the DAG's */
-// count_dags_in_graph(global, entry);
+ /* some calculations are dependent, we pushed them on the wait_q */
+ while (! pdeq_empty(status->wait_q)) {
+ entry = pdeq_getr(status->wait_q);
+
+ update_graph_stat_2(global, entry);
+ }
+
+
+ /* dump per graph */
+ for (entry = pset_first(status->irg_hash); entry; entry = pset_next(status->irg_hash)) {
- /* calculate the pattern */
- stat_calc_pattern_history(entry->irg);
+ if (entry->irg == NULL) {
+ /* special entry for the global count */
+ continue;
}
- dump_graph(entry);
+ if (! entry->is_deleted || status->stat_options & FIRMSTAT_COUNT_DELETED) {
+ stat_dump_graph(entry);
+ }
- /* clear the counter here:
- * we need only the edge counter to be cleared, all others are cumulative
- */
- cnt_clr(&entry->cnt_edges);
+ if (! entry->is_deleted) {
+ /* clear the counter that are not accumulated */
+ graph_clear_entry(entry, 0);
+ }
}
/* dump global */
-// dump_graph(global);
- dump_finish();
+ stat_dump_graph(global);
+
+ /* dump the const info */
+ if (status->stat_options & FIRMSTAT_COUNT_CONSTS)
+ stat_dump_consts(&status->const_info);
+
+ stat_dump_finish();
stat_finish_pattern_history();
for (entry = pset_first(global->opcode_hash); entry; entry = pset_next(global->opcode_hash)) {
opcode_clear_entry(entry);
}
- graph_clear_entry(global);
+ /* clear all global counter */
+ graph_clear_entry(global, 1);
}
-
- /* finished */
-// status->enable = 0;
}
STAT_LEAVE;
}
-#else
+/** the hook entries for the Firm statistics module */
+static hook_entry_t stat_hooks[hook_last];
-/* need this for prototypes */
-#define FIRM_STATISTICS
-#include "firmstat.h"
+/* initialize the statistics module. */
+void firm_init_stat(unsigned enable_options)
+{
+#define X(a) a, sizeof(a)-1
+#define HOOK(h, fkt) \
+ stat_hooks[h].hook._##h = fkt; register_hook(h, &stat_hooks[h])
+ unsigned num = 0;
-void init_stat(unsigned enable_options) {}
+ if (! (enable_options & FIRMSTAT_ENABLED))
+ return;
-void stat_finish(const char *name) {}
+ status = xmalloc(sizeof(*status));
+ memset(status, 0, sizeof(*status));
-void stat_new_ir_op(const ir_op *op) {}
+ /* enable statistics */
+ status->stat_options = enable_options & FIRMSTAT_ENABLED ? enable_options : 0;
+
+ /* register all hooks */
+ HOOK(hook_new_ir_op, stat_new_ir_op);
+ HOOK(hook_free_ir_op, stat_free_ir_op);
+ HOOK(hook_new_node, stat_new_node);
+ HOOK(hook_turn_into_id, stat_turn_into_id);
+ HOOK(hook_new_graph, stat_new_graph);
+ HOOK(hook_free_graph, stat_free_graph);
+ HOOK(hook_irg_walk, stat_irg_walk);
+ HOOK(hook_irg_walk_blkwise, stat_irg_walk_blkwise);
+ HOOK(hook_irg_block_walk, stat_irg_block_walk);
+ HOOK(hook_merge_nodes, stat_merge_nodes);
+ HOOK(hook_reassociate, stat_reassociate);
+ HOOK(hook_lower, stat_lower);
+ HOOK(hook_inline, stat_inline);
+ HOOK(hook_tail_rec, stat_tail_rec);
+ HOOK(hook_strength_red, stat_strength_red);
+ HOOK(hook_dead_node_elim, stat_dead_node_elim);
+ HOOK(hook_if_conversion, stat_if_conversion);
+ HOOK(hook_func_call, stat_func_call);
+ HOOK(hook_arch_dep_replace_mul_with_shifts, stat_arch_dep_replace_mul_with_shifts);
+ HOOK(hook_arch_dep_replace_division_by_const, stat_arch_dep_replace_division_by_const);
-void stat_free_ir_op(const ir_op *op) {}
+ obstack_init(&status->cnts);
+ obstack_init(&status->be_data);
-void stat_new_node(const ir_node *node) {}
+ /* create the hash-tables */
+ status->irg_hash = new_pset(graph_cmp, 8);
+ status->ir_op_hash = new_pset(opcode_cmp_2, 1);
-void stat_turn_into_id(const ir_node *node) {}
+ /* create the wait queue */
+ status->wait_q = new_pdeq();
-void stat_new_graph(ir_graph *irg, entity *ent) {}
+ if (enable_options & FIRMSTAT_COUNT_STRONG_OP) {
+ /* build the pseudo-ops */
-void stat_free_graph(ir_graph *irg) {}
+ _op_Phi0.code = --num;
+ _op_Phi0.name = new_id_from_chars(X("Phi0"));
-void stat_irg_walk(ir_graph *irg, void *pre, void *post) {}
+ _op_PhiM.code = --num;
+ _op_PhiM.name = new_id_from_chars(X("PhiM"));
-void stat_irg_block_walk(ir_graph *irg, const ir_node *node, void *pre, void *post) {}
+ _op_ProjM.code = --num;
+ _op_ProjM.name = new_id_from_chars(X("ProjM"));
-void stat_merge_nodes(
- ir_node **new_node_array, int new_num_entries,
- ir_node **old_node_array, int old_num_entries,
- stat_opt_kind opt) {}
+ _op_MulC.code = --num;
+ _op_MulC.name = new_id_from_chars(X("MulC"));
-void stat_lower(ir_node *node) {}
+ _op_DivC.code = --num;
+ _op_DivC.name = new_id_from_chars(X("DivC"));
-void stat_inline(ir_node *call, ir_graph *irg) {}
+ _op_ModC.code = --num;
+ _op_ModC.name = new_id_from_chars(X("ModC"));
-void stat_tail_rec(ir_graph *irg) {}
+ _op_DivModC.code = --num;
+ _op_DivModC.name = new_id_from_chars(X("DivModC"));
-void stat_strength_red(ir_graph *irg, ir_node *strong, ir_node *cmp) {}
+ status->op_Phi0 = &_op_Phi0;
+ status->op_PhiM = &_op_PhiM;
+ status->op_ProjM = &_op_ProjM;
+ status->op_MulC = &_op_MulC;
+ status->op_DivC = &_op_DivC;
+ status->op_ModC = &_op_ModC;
+ status->op_DivModC = &_op_DivModC;
+ }
+ else {
+ status->op_Phi0 = NULL;
+ status->op_PhiM = NULL;
+ status->op_ProjM = NULL;
+ status->op_MulC = NULL;
+ status->op_DivC = NULL;
+ status->op_ModC = NULL;
+ status->op_DivModC = NULL;
+ }
-void stat_dead_node_elim_start(ir_graph *irg) {}
+ if (enable_options & FIRMSTAT_COUNT_SELS) {
+ _op_SelSel.code = --num;
+ _op_SelSel.name = new_id_from_chars(X("Sel(Sel)"));
-void stat_dead_node_elim_stop(ir_graph *irg) {}
+ _op_SelSelSel.code = --num;
+ _op_SelSelSel.name = new_id_from_chars(X("Sel(Sel(Sel))"));
-#endif
+ status->op_SelSel = &_op_SelSel;
+ status->op_SelSelSel = &_op_SelSelSel;
+ }
+ else {
+ status->op_SelSel = NULL;
+ status->op_SelSelSel = NULL;
+ }
+
+ /* register the dumper */
+ stat_register_dumper(&simple_dumper);
+
+ if (enable_options & FIRMSTAT_CSV_OUTPUT)
+ stat_register_dumper(&csv_dumper);
+
+ /* initialize the pattern hash */
+ stat_init_pattern_history(enable_options & FIRMSTAT_PATTERN_ENABLED);
+
+ /* initialize the Const options */
+ if (enable_options & FIRMSTAT_COUNT_CONSTS)
+ stat_init_const_cnt(status);
+
+#undef HOOK
+#undef X
+}
+
+/* terminates the statistics module, frees all memory */
+void stat_term(void) {
+ if (status != (stat_info_t *)&status_disable) {
+ obstack_free(&status->be_data, NULL);
+ obstack_free(&status->cnts, NULL);
+ xfree(status);
+ status = (stat_info_t *)&status_disable;
+ }
+}
+
+/* returns 1 if statistics were initialized, 0 otherwise */
+int stat_is_active(void) {
+ return status != (stat_info_t *)&status_disable;
+}
+
+#else
+
+/* initialize the statistics module. */
+void firm_init_stat(unsigned enable_options) {}
+
+/* Dumps a statistics snapshot */
+void stat_dump_snapshot(const char *name, const char *phase) {}
+
+/* terminates the statistics module, frees all memory */
+void stat_term(void);
+
+#endif /* FIRM_STATISTICS */