X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fstat%2Ffirmstat.c;h=1bac51828a949465d5d7fc3a91032a21032174ba;hb=184602875611495e608c27263d0a5a8f24078a70;hp=7b68085c59dceef7ef5da2feb3fe968baffde6c6;hpb=4bd3b5671a4d424570183fe1bf44c4380771f2b5;p=libfirm diff --git a/ir/stat/firmstat.c b/ir/stat/firmstat.c index 7b68085c5..1bac51828 100644 --- a/ir/stat/firmstat.c +++ b/ir/stat/firmstat.c @@ -13,160 +13,64 @@ # include "config.h" #endif -# include - -#include -#include - #ifdef FIRM_STATISTICS -#include "firmstat.h" -# include "irop_t.h" -# include "irnode_t.h" -# include "irgraph_t.h" -# include "pset.h" -# include "irprog.h" -# include "irgwalk.h" -# include "pattern.h" -# include "counter.h" - -/* - * just be make some things clear :-), the - * poor man "generics" - */ -#define HASH_MAP(type) hmap_##type +#include -typedef pset hmap_node_entry_t; -typedef pset hmap_graph_entry_t; -typedef pset hmap_opt_entry_t; -typedef pset hmap_block_entry_t; -typedef pset hmap_ir_op; +#ifdef HAVE_STDLIB_H +# include +#endif +#ifdef HAVE_STRING_H +# include +#endif -/* - * An entry for ir_nodes, used in ir_graph statistics. - */ -typedef struct _node_entry_t { - counter_t cnt_alive; /**< amount of nodes in this entry */ - counter_t new_node; /**< amount of new nodes for this entry */ - counter_t into_Id; /**< amount of nodes that turned into Id's for this entry */ - const ir_op *op; /**< the op for this entry */ -} node_entry_t; +#include "irouts.h" +#include "irdump.h" +#include "hashptr.h" +#include "firmstat_t.h" +#include "pattern.h" +#include "dags.h" +#include "stat_dmp.h" +#include "xmalloc.h" +#include "irhooks.h" /* - * An entry for ir_graphs - */ -typedef struct _graph_entry_t { - HASH_MAP(node_entry_t) *opcode_hash; /**< hash map containing the opcode counter */ - HASH_MAP(block_entry_t) *block_hash; /**< hash map countaining the block counter */ - counter_t cnt_walked; /**< walker walked over the graph */ - counter_t cnt_walked_blocks; /**< walker walked over the graph blocks */ - counter_t cnt_was_inlined; /**< number of times other graph were inlined */ - counter_t cnt_got_inlined; /**< number of times this graph was inlined */ - counter_t cnt_edges; /**< number of DF edges in this graph */ - HASH_MAP(opt_entry_t) *opt_hash[STAT_OPT_MAX]; /**< hash maps containing opcode counter for optimizations */ - ir_graph *irg; /**< the graph of this object */ - entity *ent; /**< the entity of this graph if one exists */ - int deleted; /**< set if this irg was deleted */ -} graph_entry_t; - -/** - * An entry for optimized ir_nodes + * need this to be static: + * Special pseudo Opcodes that we need to count some interesting cases */ -typedef struct _opt_entry_t { - counter_t count; /**< optimization counter */ - const ir_op *op; /**< the op for this entry */ -} opt_entry_t; /** - * An entry for a block in a ir-graph + * The Phi0, a node that is created during SSA construction */ -typedef struct _block_entry_t { - counter_t cnt_nodes; /**< the counter of nodes in this block */ - counter_t cnt_edges; /**< the counter of edges in this block */ - counter_t cnt_in_edges; /**< the counter of edges incoming from other blocks to this block */ - counter_t cnt_out_edges; /**< the counter of edges outgoing from this block to other blocks */ - long block_nr; /**< block nr */ -} block_entry_t; +static ir_op _op_Phi0; -/** forward */ -typedef struct _dumper_t dumper_t; +/** The PhiM, just to count memory Phi's. */ +static ir_op _op_PhiM; -/** - * handler for dumping an IRG - * - * @param dmp the dumper - * @param entry the IR-graph hash map entry - */ -typedef void (*dump_graph_FUNC)(dumper_t *dmp, graph_entry_t *entry); +/** The Mul by Const node. */ +static ir_op _op_MulC; -/** - * handler for dumper init - * - * @param dmp the dumper - * @param name name of the file to dump to - */ -typedef void (*dump_init_FUNC)(dumper_t *dmp, const char *name); +/** The Div by Const node. */ +static ir_op _op_DivC; -/** - * handler for dumper finish - * - * @param dmp the dumper - */ -typedef void (*dump_finish_FUNC)(dumper_t *dmp); +/** The Div by Const node. */ +static ir_op _op_ModC; +/** The Div by Const node. */ +static ir_op _op_DivModC; -/** - * a dumper description - */ -struct _dumper_t { - dump_graph_FUNC dump_graph; /**< handler for dumping an irg */ - dump_init_FUNC init; /**< handler for init */ - dump_finish_FUNC finish; /**< handler for finish */ - FILE *f; /**< the file to dump to */ - dumper_t *next; /**< link to the next dumper */ -}; - -/** - * statistics info - */ -typedef struct _statistic_info_t { - struct obstack cnts; /**< obstack containing the counters */ - HASH_MAP(graph_entry_t) *irg_hash; /**< hash map containing the counter for irgs */ - HASH_MAP(ir_op) *ir_op_hash; /**< hash map containing all ir_ops (accessible by op_codes) */ - int recursive; /**< flag for detecting recursive hook calls */ - int in_dead_node_elim; /**< set, if dead node elimination runs */ - ir_op *op_Phi0; /**< needed pseudo op */ - ir_op *op_PhiM; /**< needed pseudo op */ - dumper_t *dumper; /**< list of dumper */ - int enable; /**< if set, statistic is enabled */ -} stat_info_t; - -/** - * names of the optimizations - */ -static const char *opt_names[] = { - "straightening optimization", - "if simplification", - "algebraic simplification", - "Phi optmization", - "Write-After-Write optimization", - "Write-After-Read optimization", - "Read-After-Write optimization", - "Tuple optimization", - "ID optimization", - "Constant evaluation", - "Lowered", -}; - -/** - * need this to be static - */ -static ir_op _op_Phi0, _op_PhiM; +/** The memory Proj node. */ +static ir_op _op_ProjM; /* ---------------------------------------------------------------------------------- */ +/** Marks the begin of a statistic (hook) function. */ #define STAT_ENTER ++status->recursive + +/** Marks the end of a statistic (hook) functions. */ #define STAT_LEAVE --status->recursive + +/** Allows to enter a statistic function only when we are not already in a hook. */ #define STAT_ENTER_SINGLE do { if (status->recursive > 0) return; ++status->recursive; } while (0) /** @@ -229,56 +133,94 @@ static int opcode_cmp_2(const void *elt, const void *key) return e1->code != e2->code; } +/** + * compare two elements of the address_mark set + */ +static int address_mark_cmp(const void *elt, const void *key, size_t size) +{ + const address_mark_entry_t *e1 = elt; + const address_mark_entry_t *e2 = key; + + /* compare only the nodes, the rest is used as data container */ + return e1->node != e2->node; +} + +/** + * clears all counter in a node_entry_t + */ +static void opcode_clear_entry(node_entry_t *elem) +{ + cnt_clr(&elem->cnt_alive); + cnt_clr(&elem->new_node); + cnt_clr(&elem->into_Id); +} + /** * Returns the associates node_entry_t for an ir_op + * + * @param op the IR operation + * @param hmap a hash map containing ir_op* -> node_entry_t* */ -static node_entry_t *opcode_get_entry(const ir_op *op, pset *set) +static node_entry_t *opcode_get_entry(const ir_op *op, hmap_node_entry_t *hmap) { node_entry_t key; node_entry_t *elem; key.op = op; - elem = pset_find(set, &key, op->code); + elem = pset_find(hmap, &key, op->code); if (elem) return elem; elem = obstack_alloc(&status->cnts, sizeof(*elem)); /* clear counter */ - cnt_clr(&elem->cnt_alive); - cnt_clr(&elem->new_node); - cnt_clr(&elem->into_Id); + opcode_clear_entry(elem); elem->op = op; - return pset_insert(set, elem, op->code); + return pset_insert(hmap, elem, op->code); } /** * Returns the associates ir_op for an opcode + * + * @param code the IR opcode + * @param hmap the hash map containing opcode -> ir_op* */ -static ir_op *opcode_find_entry(opcode code, pset *set) +static ir_op *opcode_find_entry(opcode code, hmap_ir_op *hmap) { ir_op key; key.code = code; - return pset_find(set, &key, code); + return pset_find(hmap, &key, code); } /** - * calculates a hash value for an irg - * Addresses are typically aligned at 32bit, so we ignore the lowest bits + * clears all counter in a graph_entry_t */ -static INLINE unsigned irg_hash(const ir_graph *irg) +static void graph_clear_entry(graph_entry_t *elem, int all) { - return (unsigned)irg >> 3; + if (all) { + cnt_clr(&elem->cnt_walked); + cnt_clr(&elem->cnt_walked_blocks); + cnt_clr(&elem->cnt_was_inlined); + cnt_clr(&elem->cnt_got_inlined); + cnt_clr(&elem->cnt_strength_red); + } + cnt_clr(&elem->cnt_edges); + cnt_clr(&elem->cnt_all_calls); + cnt_clr(&elem->cnt_call_with_cnst_arg); + cnt_clr(&elem->cnt_indirect_calls); } /** - * Returns the acssociates graph_entry_t for an irg + * Returns the associated graph_entry_t for an IR graph. + * + * @param irg the IR graph + * @param hmap the hash map containing ir_graph* -> graph_entry_t* */ -static graph_entry_t *graph_get_entry(ir_graph *irg, pset *set) +static graph_entry_t *graph_get_entry(ir_graph *irg, hmap_graph_entry_t *hmap) { graph_entry_t key; graph_entry_t *elem; @@ -286,104 +228,149 @@ static graph_entry_t *graph_get_entry(ir_graph *irg, pset *set) key.irg = irg; - elem = pset_find(set, &key, irg_hash(irg)); + elem = pset_find(hmap, &key, HASH_PTR(irg)); if (elem) return elem; + /* allocate a new one */ elem = obstack_alloc(&status->cnts, sizeof(*elem)); - cnt_clr(&elem->cnt_walked); - cnt_clr(&elem->cnt_walked_blocks); - cnt_clr(&elem->cnt_got_inlined); - cnt_clr(&elem->cnt_was_inlined); - cnt_clr(&elem->cnt_edges); + /* clear counter */ + graph_clear_entry(elem, 1); /* new hash table for opcodes here */ elem->opcode_hash = new_pset(opcode_cmp, 5); elem->block_hash = new_pset(block_cmp, 5); + elem->address_mark = new_set(address_mark_cmp, 5); elem->irg = irg; for (i = 0; i < sizeof(elem->opt_hash)/sizeof(elem->opt_hash[0]); ++i) elem->opt_hash[i] = new_pset(opt_cmp, 4); - return pset_insert(set, elem, irg_hash(irg)); + return pset_insert(hmap, elem, HASH_PTR(irg)); } /** - * Returns the associates opt_entry_t for an ir_op + * clears all counter in an opt_entry_t */ -static opt_entry_t *opt_get_entry(const ir_op *op, pset *set) +static void opt_clear_entry(opt_entry_t *elem) +{ + cnt_clr(&elem->count); +} + +/** + * Returns the associated opt_entry_t for an IR operation. + * + * @param op the IR operation + * @param hmap the hash map containing ir_op* -> opt_entry_t* + */ +static opt_entry_t *opt_get_entry(const ir_op *op, hmap_opt_entry_t *hmap) { opt_entry_t key; opt_entry_t *elem; key.op = op; - elem = pset_find(set, &key, op->code); + elem = pset_find(hmap, &key, op->code); if (elem) return elem; elem = obstack_alloc(&status->cnts, sizeof(*elem)); /* clear new counter */ - cnt_clr(&elem->count); + opt_clear_entry(elem); elem->op = op; - return pset_insert(set, elem, op->code); + return pset_insert(hmap, elem, op->code); +} + +/** + * clears all counter in a block_entry_t + */ +static void block_clear_entry(block_entry_t *elem) +{ + cnt_clr(&elem->cnt_nodes); + cnt_clr(&elem->cnt_edges); + cnt_clr(&elem->cnt_in_edges); + cnt_clr(&elem->cnt_out_edges); } /** - * Returns the associates block_entry_t for an block + * Returns the associated block_entry_t for an block. + * + * @param block_nr an IR block number + * @param hmap a hash map containing long -> block_entry_t */ -static block_entry_t *block_get_entry(long block_nr, pset *set) +static block_entry_t *block_get_entry(long block_nr, hmap_block_entry_t *hmap) { block_entry_t key; block_entry_t *elem; key.block_nr = block_nr; - elem = pset_find(set, &key, block_nr); + elem = pset_find(hmap, &key, block_nr); if (elem) return elem; elem = obstack_alloc(&status->cnts, sizeof(*elem)); /* clear new counter */ - cnt_clr(&elem->cnt_nodes); - cnt_clr(&elem->cnt_edges); - cnt_clr(&elem->cnt_in_edges); - cnt_clr(&elem->cnt_out_edges); + block_clear_entry(elem); elem->block_nr = block_nr; - return pset_insert(set, elem, block_nr); + return pset_insert(hmap, elem, block_nr); } /** * Returns the ir_op for an IR-node, - * handles special cases and return pseudo op codes + * handles special cases and return pseudo op codes. + * + * @param none an IR node */ -static ir_op *stat_get_irn_op(const ir_node *node) +static ir_op *stat_get_irn_op(ir_node *node) { ir_op *op = get_irn_op(node); - if (op->code == iro_Phi && get_irn_arity(node) == 0) { + if (op == op_Phi && get_irn_arity(node) == 0) { /* special case, a Phi0 node, count on extra counter */ - op = status->op_Phi0; + op = status->op_Phi0 ? status->op_Phi0 : op; } - else if (op->code == iro_Phi && get_irn_mode(node) == mode_M) { + else if (op == op_Phi && get_irn_mode(node) == mode_M) { /* special case, a Memory Phi node, count on extra counter */ - op = status->op_PhiM; + op = status->op_PhiM ? status->op_PhiM : op; + } + else if (op == op_Proj && get_irn_mode(node) == mode_M) { + /* special case, a Memory Proj node, count on extra counter */ + op = status->op_ProjM ? status->op_ProjM : op; + } + else if (op == op_Mul && + (get_irn_op(get_Mul_left(node)) == op_Const || get_irn_op(get_Mul_right(node)) == op_Const)) { + /* special case, a Multiply by a const, count on extra counter */ + op = status->op_MulC ? status->op_MulC : op; + } + else if (op == op_Div && get_irn_op(get_Div_right(node)) == op_Const) { + /* special case, a division by a const, count on extra counter */ + op = status->op_DivC ? status->op_DivC : op; + } + else if (op == op_Mod && get_irn_op(get_Mod_right(node)) == op_Const) { + /* special case, a module by a const, count on extra counter */ + op = status->op_ModC ? status->op_ModC : op; } + else if (op == op_DivMod && get_irn_op(get_DivMod_right(node)) == op_Const) { + /* special case, a division/modulo by a const, count on extra counter */ + op = status->op_DivModC ? status->op_DivModC : op; + } + return op; } /** * update the block counter */ -static void count_block_info(ir_node *node, graph_entry_t *graph) +static void undate_block_info(ir_node *node, graph_entry_t *graph) { ir_op *op = get_irn_op(node); ir_node *block; @@ -406,9 +393,6 @@ static void count_block_info(ir_node *node, graph_entry_t *graph) } return; } - else if (op == op_Call) { - // return; - } block = get_nodes_block(node); b_entry = block_get_entry(get_irn_node_nr(block), graph->block_hash); @@ -438,334 +422,435 @@ static void count_block_info(ir_node *node, graph_entry_t *graph) } } -/** - * walker for reachable nodes count - */ -static void count_nodes(ir_node *node, void *env) +/** calculates how many arguments of the call are const */ +static int cnt_const_args(ir_node *call) { - graph_entry_t *graph = env; - node_entry_t *entry; + int i, res = 0; + int n = get_Call_n_params(call); - ir_op *op = stat_get_irn_op(node); - int arity = get_irn_arity(node); - - entry = opcode_get_entry(op, graph->opcode_hash); - - cnt_inc(&entry->cnt_alive); - cnt_add_i(&graph->cnt_edges, arity); + for (i = 0; i < n; ++i) { + ir_node *param = get_Call_param(call, i); + ir_op *op = get_irn_op(param); - /* count block edges */ - count_block_info(node, graph); + if (op == op_Const || op == op_SymConst) + ++res; + } + return res; } /** - * count all alive nodes and edges in a graph + * update info on calls + * + * @param call The call + * @param graph The graph entry containing the call */ -static void count_nodes_in_graph(graph_entry_t *global, graph_entry_t *graph) +static void update_call_stat(ir_node *call, graph_entry_t *graph) { - node_entry_t *entry; + ir_node *block = get_nodes_block(call); + ir_node *ptr = get_Call_ptr(call); + entity *ent = NULL; + ir_graph *callee = NULL; + int num_const_args; + + /* + * If the block is bad, the whole subgraph will collapse later + * so do not count this call. + * This happens in dead code. + */ + if (is_Bad(block)) + return; - irg_walk_graph(graph->irg, count_nodes, NULL, graph); + cnt_inc(&graph->cnt_all_calls); - /* assume we walk every graph only ONCE, we could sum here the global count */ - for (entry = pset_first(graph->opcode_hash); entry; entry = pset_next(graph->opcode_hash)) { - node_entry_t *g_entry = opcode_get_entry(entry->op, global->opcode_hash); + /* found a call, this function is not a leaf */ + graph->is_leaf = 0; - /* update the node counter */ - cnt_add(&g_entry->cnt_alive, &entry->cnt_alive); + if (get_irn_op(ptr) == op_SymConst) { + if (get_SymConst_kind(ptr) == symconst_addr_ent) { + /* ok, we seems to know the entity */ + ent = get_SymConst_entity(ptr); + callee = get_entity_irg(ent); + + /* it is recursive, if it calls at least once */ + if (callee == graph->irg) + graph->is_recursive = 1; + } } + else { + /* indirect call, be could not predict */ + cnt_inc(&graph->cnt_indirect_calls); - /* update the edge counter */ - cnt_add(&global->cnt_edges, &graph->cnt_edges); + /* NOT a leaf call */ + graph->is_leaf_call = LCS_NON_LEAF_CALL; + } + + /* check, if it's a chain-call: Then, the call-block + * must dominate the end block. */ + { + ir_node *curr = get_irg_end_block(graph->irg); + int depth = get_Block_dom_depth(block); + + for (; curr != block && get_Block_dom_depth(curr) > depth;) { + curr = get_Block_idom(curr); + + if (! curr || is_no_Block(curr)) + break; + } + + if (curr != block) + graph->is_chain_call = 0; + } + + /* check, if the callee is a leaf */ + if (callee) { + graph_entry_t *called = graph_get_entry(callee, status->irg_hash); + + if (called->is_analyzed) { + if (! called->is_leaf) + graph->is_leaf_call = LCS_NON_LEAF_CALL; + } + } + + /* check, if arguments of the call are const */ + num_const_args = cnt_const_args(call); + + if (num_const_args > 0) + cnt_inc(&graph->cnt_call_with_cnst_arg); } /** - * register a dumper + * update info on calls for graphs on the wait queue */ -static void stat_register_dumper(dumper_t *dumper, const char *name) +static void update_call_stat_2(ir_node *call, graph_entry_t *graph) { - dumper->next = status->dumper; - status->dumper = dumper; + ir_node *block = get_nodes_block(call); + ir_node *ptr = get_Call_ptr(call); + entity *ent = NULL; + ir_graph *callee = NULL; + + /* + * If the block is bad, the whole subgraph will collapse later + * so do not count this call. + * This happens in dead code. + */ + if (is_Bad(block)) + return; + + if (get_irn_op(ptr) == op_SymConst) { + if (get_SymConst_kind(ptr) == symconst_addr_ent) { + /* ok, we seems to know the entity */ + ent = get_SymConst_entity(ptr); + callee = get_entity_irg(ent); + } + } + + /* check, if the callee is a leaf */ + if (callee) { + graph_entry_t *called = graph_get_entry(callee, status->irg_hash); - if (dumper->init) - dumper->init(dumper, name); + assert(called->is_analyzed); + + if (! called->is_leaf) + graph->is_leaf_call = LCS_NON_LEAF_CALL; + } + else + graph->is_leaf_call = LCS_NON_LEAF_CALL; } /** - * dumps an irg + * walker for reachable nodes count */ -static void dump_graph(graph_entry_t *entry) +static void update_node_stat(ir_node *node, void *env) { - dumper_t *dumper; + graph_entry_t *graph = env; + node_entry_t *entry; - for (dumper = status->dumper; dumper; dumper = dumper->next) { - if (dumper->dump_graph) - dumper->dump_graph(dumper, entry); - } + ir_op *op = stat_get_irn_op(node); + int arity = get_irn_arity(node); + + entry = opcode_get_entry(op, graph->opcode_hash); + + cnt_inc(&entry->cnt_alive); + cnt_add_i(&graph->cnt_edges, arity); + + /* count block edges */ + undate_block_info(node, graph); + + /* check for properties that depends on calls like recursion/leaf/indirect call */ + if (op == op_Call) + update_call_stat(node, graph); } /** - * finish the dumper + * walker for reachable nodes count for graphs on the wait_q */ -static void dump_finish(void) +static void update_node_stat_2(ir_node *node, void *env) { - dumper_t *dumper; + graph_entry_t *graph = env; - for (dumper = status->dumper; dumper; dumper = dumper->next) { - if (dumper->finish) - dumper->finish(dumper); - } + /* check for properties that depends on calls like recursion/leaf/indirect call */ + if (get_irn_op(node) == op_Call) + update_call_stat_2(node, graph); } -/* ---------------------------------------------------------------------- */ - /** - * dumps a opcode hash into human readable form + * get the current address mark */ -static void simple_dump_opcode_hash(dumper_t *dmp, pset *set) +static unsigned get_adr_mark(graph_entry_t *graph, ir_node *node) { - node_entry_t *entry; - counter_t f_alive; - counter_t f_new_node; - counter_t f_Id; - - cnt_clr(&f_alive); - cnt_clr(&f_new_node); - cnt_clr(&f_Id); - - fprintf(dmp->f, "%-16s %-8s %-8s %-8s\n", "Opcode", "alive", "created", "->Id"); - for (entry = pset_first(set); entry; entry = pset_next(set)) { - fprintf(dmp->f, "%-16s %8d %8d %8d\n", - get_id_str(entry->op->name), entry->cnt_alive.cnt[0], entry->new_node.cnt[0], entry->into_Id.cnt[0]); - - cnt_add(&f_alive, &entry->cnt_alive); - cnt_add(&f_new_node, &entry->new_node); - cnt_add(&f_Id, &entry->into_Id); - } - fprintf(dmp->f, "-------------------------------------------\n"); - fprintf(dmp->f, "%-16s %8d %8d %8d\n", "Sum", - f_alive.cnt[0], - f_new_node.cnt[0], - f_Id.cnt[0]); + address_mark_entry_t *value = set_find(graph->address_mark, &node, sizeof(*value), HASH_PTR(node)); + + return value ? value->mark : 0; } /** - * dumps a optimization hash into human readable form + * set the current address mark */ -static void simple_dump_opt_hash(dumper_t *dmp, pset *set, int index) +static void set_adr_mark(graph_entry_t *graph, ir_node *node, unsigned val) { - opt_entry_t *entry = pset_first(set); - - if (entry) { - fprintf(dmp->f, "\n%s:\n", opt_names[index]); - fprintf(dmp->f, "%-16s %-8s\n", "Opcode", "deref"); + address_mark_entry_t *value = set_insert(graph->address_mark, &node, sizeof(*value), HASH_PTR(node)); - for (; entry; entry = pset_next(set)) { - fprintf(dmp->f, "%-16s %8d\n", - get_id_str(entry->op->name), entry->count.cnt[0]); - } - } + value->mark = val; } /** - * dumps the endges count + * a vcg attribute hook: Color a node with a different color if + * it's identified as a part of an address expression or at least referenced + * by an address expression. */ -static void simple_dump_edges(dumper_t *dmp, counter_t *cnt) +static int stat_adr_mark_hook(FILE *F, ir_node *node, ir_node *local) { - fprintf(dmp->f, "%-16s %8d\n", "Edges", cnt->cnt[0]); + ir_node *n = local ? local : node; + ir_graph *irg = get_irn_irg(n); + graph_entry_t *graph = graph_get_entry(irg, status->irg_hash); + unsigned mark = get_adr_mark(graph, n); + + if (mark & MARK_ADDRESS_CALC) + fprintf(F, "color: purple"); + else if ((mark & (MARK_REF_ADR | MARK_REF_NON_ADR)) == MARK_REF_ADR) + fprintf(F, "color: pink"); + else if ((mark & (MARK_REF_ADR | MARK_REF_NON_ADR)) == (MARK_REF_ADR|MARK_REF_NON_ADR)) + fprintf(F, "color: lightblue"); + else + return 0; + + /* I know the color! */ + return 1; } /** - * dumps the IRG + * walker that marks every node that is an address calculation + * + * predecessor nodes must be visited first. We ensure this by + * calling in in the post of an outs walk. This should work even in cycles, + * while the pre in a normal walk will not. */ -static void simple_dump_graph(dumper_t *dmp, graph_entry_t *entry) +static void mark_address_calc(ir_node *node, void *env) { - int dump_opts = 1; - block_entry_t *b_entry; + graph_entry_t *graph = env; + ir_mode *mode = get_irn_mode(node); + int i, n; + unsigned mark_preds = MARK_REF_NON_ADR; - if (entry->irg) { - ir_graph *const_irg = get_const_code_irg(); + if (! mode_is_numP(mode)) + return; - if (entry->irg == const_irg) { - fprintf(dmp->f, "\nConst code Irg %p", (void *)entry->irg); - } - else { - if (entry->ent) - fprintf(dmp->f, "\nEntity %s, Irg %p", get_entity_name(entry->ent), (void *)entry->irg); - else - fprintf(dmp->f, "\nIrg %p", (void *)entry->irg); - } + if (mode_is_reference(mode)) { + /* a reference is calculated here, we are sure */ + set_adr_mark(graph, node, MARK_ADDRESS_CALC); - fprintf(dmp->f, " %swalked %d over blocks %d was inlined %d got inlined %d:\n", - entry->deleted ? "DELETED " : "", - entry->cnt_walked.cnt[0], entry->cnt_walked_blocks.cnt[0], - entry->cnt_was_inlined.cnt[0], - entry->cnt_got_inlined.cnt[0] - ); + mark_preds = MARK_REF_ADR; } else { - fprintf(dmp->f, "\nGlobals counts:\n"); - dump_opts = 0; - } - - simple_dump_opcode_hash(dmp, entry->opcode_hash); - simple_dump_edges(dmp, &entry->cnt_edges); - - /* effects of optimizations */ - if (dump_opts) { - int i; - - for (i = 0; i < sizeof(entry->opt_hash)/sizeof(entry->opt_hash[0]); ++i) { - simple_dump_opt_hash(dmp, entry->opt_hash[i], i); + unsigned mark = get_adr_mark(graph, node); + + if ((mark & (MARK_REF_ADR | MARK_REF_NON_ADR)) == MARK_REF_ADR) { + /* + * this node has not an reference mode, but is only + * referenced by address calculations + */ + mark_preds = MARK_REF_ADR; } } - /* dump block info */ - fprintf(dmp->f, "\n%12s %12s %12s %12s %12s %12s\n", "Block Nr", "Nodes", "intern", "incoming", "outgoing", "quot"); - for (b_entry = pset_first(entry->block_hash); - b_entry; - b_entry = pset_next(entry->block_hash)) { - fprintf(dmp->f, "%12ld %12u %12u %12u %12u %4.8f\n", - b_entry->block_nr, - b_entry->cnt_nodes.cnt[0], - b_entry->cnt_edges.cnt[0], - b_entry->cnt_in_edges.cnt[0], - b_entry->cnt_out_edges.cnt[0], - (double)b_entry->cnt_edges.cnt[0] / (double)b_entry->cnt_nodes.cnt[0] - ); + /* mark all predecessors */ + for (i = 0, n = get_irn_arity(node); i < n; ++i) { + ir_node *pred = get_irn_n(node, i); + + set_adr_mark(graph, pred, get_adr_mark(graph, pred) | mark_preds); } } /** - * initialise the simple dumper + * Called for every graph when the graph is either deleted or stat_finish() + * is called, must recalculate all statistic info. + * + * @param global The global entry + * @param graph The current entry */ -static void simple_init(dumper_t *dmp, const char *name) +static void update_graph_stat(graph_entry_t *global, graph_entry_t *graph) { - dmp->f = fopen(name, "w"); -} + node_entry_t *entry; -/** - * finishes the simple dumper - */ -static void simple_finish(dumper_t *dmp) -{ - fclose(dmp->f); - dmp->f = NULL; -} + /* clear first the alive counter in the graph */ + for (entry = pset_first(graph->opcode_hash); entry; entry = pset_next(graph->opcode_hash)) { + cnt_clr(&entry->cnt_alive); + } -/** - * the simple human readable dumper - */ -static dumper_t simple_dumper = { - simple_dump_graph, - simple_init, - simple_finish, - NULL, - NULL, -}; + /* set pessimistic values */ + graph->is_leaf = 1; + graph->is_leaf_call = LCS_UNKNOWN; + graph->is_recursive = 0; + graph->is_chain_call = 1; -/* ---------------------------------------------------------------------- */ + /* we need dominator info */ + if (graph->irg != get_const_code_irg()) + if (get_irg_dom_state(graph->irg) != dom_consistent) + compute_doms(graph->irg); -/** - * count the nodes as needed: - * - * 1 normal (data) Phi's - * 2 memory Phi's - * 3 Proj - * 0 all other nodes - */ -static void csv_count_nodes(graph_entry_t *graph, counter_t cnt[]) -{ - node_entry_t *entry; - int i; + /* count the nodes in the graph */ + irg_walk_graph(graph->irg, update_node_stat, NULL, graph); + +#if 0 + entry = opcode_get_entry(op_Call, graph->opcode_hash); - for (i = 0; i < 4; ++i) - cnt_clr(&cnt[i]); + /* check if we have more than 1 call */ + if (cnt_gt(entry->cnt_alive, 1)) + graph->is_chain_call = 0; +#endif + /* recursive functions are never chain calls, leafs don't have calls */ + if (graph->is_recursive || graph->is_leaf) + graph->is_chain_call = 0; + + /* assume we walk every graph only ONCE, we could sum here the global count */ for (entry = pset_first(graph->opcode_hash); entry; entry = pset_next(graph->opcode_hash)) { - if (entry->op == op_Phi) { - /* normal Phi */ - cnt_add(&cnt[1], &entry->cnt_alive); - } - else if (entry->op == status->op_PhiM) { - /* memory Phi */ - cnt_add(&cnt[2], &entry->cnt_alive); - } - else if (entry->op == op_Proj) { - /* Proj */ - cnt_add(&cnt[3], &entry->cnt_alive); - } - else { - /* all other nodes */ - cnt_add(&cnt[0], &entry->cnt_alive); - } + node_entry_t *g_entry = opcode_get_entry(entry->op, global->opcode_hash); + + /* update the node counter */ + cnt_add(&g_entry->cnt_alive, &entry->cnt_alive); } + + /* update the edge counter */ + cnt_add(&global->cnt_edges, &graph->cnt_edges); + + /* count the number of address calculation */ + if (graph->irg != get_const_code_irg()) { + ir_graph *rem = current_ir_graph; + + if (get_irg_outs_state(graph->irg) != outs_consistent) + compute_outs(graph->irg); + + /* Must be done an the outs graph */ + current_ir_graph = graph->irg; + irg_out_walk(get_irg_start(graph->irg), NULL, mark_address_calc, graph); + current_ir_graph = rem; + +#if 0 + set_dump_node_vcgattr_hook(stat_adr_mark_hook); + dump_ir_block_graph(graph->irg, "-adr"); + set_dump_node_vcgattr_hook(NULL); +#endif + } + + /* count the DAG's */ + if (status->stat_options & FIRMSTAT_COUNT_DAG) + count_dags_in_graph(global, graph); + + /* calculate the patterns of this graph */ + stat_calc_pattern_history(graph->irg); + + /* leaf function did not call others */ + if (graph->is_leaf) + graph->is_leaf_call = LCS_NON_LEAF_CALL; + else if (graph->is_leaf_call == LCS_UNKNOWN) { + /* we still don't know if this graph calls leaf-functions, so enqueue */ + pdeq_putl(status->wait_q, graph); + } + + /* we have analyzed this graph */ + graph->is_analyzed = 1; } /** - * dumps the IRG + * Called for every graph that was on the wait_q in stat_finish() + * must finish all statistic info calculations. + * + * @param global The global entry + * @param graph The current entry */ -static void csv_dump_graph(dumper_t *dmp, graph_entry_t *entry) +static void update_graph_stat_2(graph_entry_t *global, graph_entry_t *graph) { - const char *name; + if (graph->is_deleted) { + /* deleted, ignore */ + return; + } - counter_t cnt[4]; + if (graph->irg) { + /* count the nodes in the graph */ + irg_walk_graph(graph->irg, update_node_stat_2, NULL, graph); - if (entry->irg) { - ir_graph *const_irg = get_const_code_irg(); + if (graph->is_leaf_call == LCS_UNKNOWN) + graph->is_leaf_call = LCS_LEAF_CALL; + } +} - if (entry->irg == const_irg) { - name = ""; - return; - } - else { - if (entry->ent) - name = get_entity_name(entry->ent); - else - name = ""; - } +/** + * register a dumper + */ +static void stat_register_dumper(const dumper_t *dumper) +{ + dumper_t *p = xmalloc(sizeof(*p)); - csv_count_nodes(entry, cnt); + if (p) { + *p = *dumper; - fprintf(dmp->f, "%-40s, %p, %d, %d, %d, %d\n", - name, - (void *)entry->irg, - cnt[0].cnt[0], - cnt[1].cnt[0], - cnt[2].cnt[0], - cnt[3].cnt[0] - ); + p->next = status->dumper; + p->status = status; + status->dumper = p; } + + /* FIXME: memory leak */ } /** - * initialise the simple dumper + * dumps an IR graph. */ -static void csv_init(dumper_t *dmp, const char *name) +static void stat_dump_graph(graph_entry_t *entry) { - dmp->f = fopen(name, "a"); + dumper_t *dumper; + + for (dumper = status->dumper; dumper; dumper = dumper->next) { + if (dumper->dump_graph) + dumper->dump_graph(dumper, entry); + } } /** - * finishes the simple dumper + * initialize the dumper */ -static void csv_finish(dumper_t *dmp) +static void stat_dump_init(const char *name) { - fclose(dmp->f); - dmp->f = NULL; + dumper_t *dumper; + + for (dumper = status->dumper; dumper; dumper = dumper->next) { + if (dumper->init) + dumper->init(dumper, name); + } } /** - * the simple human readable dumper + * finish the dumper */ -static dumper_t csv_dumper = { - csv_dump_graph, - csv_init, - csv_finish, - NULL, - NULL, -}; +static void stat_dump_finish(void) +{ + dumper_t *dumper; + for (dumper = status->dumper; dumper; dumper = dumper->next) { + if (dumper->finish) + dumper->finish(dumper); + } +} /* ---------------------------------------------------------------------- */ @@ -777,47 +862,15 @@ ir_op *stat_get_op_from_opcode(opcode code) return opcode_find_entry(code, status->ir_op_hash); } -/* initialize the statistics module. */ -void init_stat(unsigned enable_options) -{ -#define X(a) a, sizeof(a)-1 - - int pseudo_id = 0; - - /* enable statistics */ - status->enable = enable_options & FIRMSTAT_ENABLED; - - if (! status->enable) - return; - - obstack_init(&status->cnts); - - /* build the pseudo-ops */ - _op_Phi0.code = --pseudo_id; - _op_Phi0.name = new_id_from_chars(X("Phi0")); - - _op_PhiM.code = --pseudo_id; - _op_PhiM.name = new_id_from_chars(X("PhiM")); - - /* create the hash-tables */ - status->irg_hash = new_pset(graph_cmp, 8); - status->ir_op_hash = new_pset(opcode_cmp_2, 1); - - status->op_Phi0 = &_op_Phi0; - status->op_PhiM = &_op_PhiM; - - stat_register_dumper(&simple_dumper, "firmstat.txt"); - stat_register_dumper(&csv_dumper, "firmstat.csv"); - - /* initialize the pattern hash */ - stat_init_pattern_history(enable_options & FIRMSTAT_PATTERN_ENABLED); -#undef X -} - -/* A new IR op is registered. */ -void stat_new_ir_op(const ir_op *op) +/** + * A new IR op is registered. + * + * @param ctx the hook context + * @param op the new IR opcode that was created. + */ +static void stat_new_ir_op(void *ctx, ir_op *op) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER; @@ -832,10 +885,15 @@ void stat_new_ir_op(const ir_op *op) STAT_LEAVE; } -/* An IR op is freed. */ -void stat_free_ir_op(const ir_op *op) +/** + * An IR op is freed. + * + * @param ctx the hook context + * @param op the IR opcode that is freed + */ +static void stat_free_ir_op(void *ctx, ir_op *op) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER; @@ -844,10 +902,16 @@ void stat_free_ir_op(const ir_op *op) STAT_LEAVE; } -/* A new node is created. */ -void stat_new_node(const ir_node *node) +/** + * A new node is created. + * + * @param ctx the hook context + * @param irg the IR graph on which the node is created + * @param node the new IR node that was created + */ +static void stat_new_node(void *ctx, ir_graph *irg, ir_node *node) { - if (! status->enable) + if (! status->stat_options) return; /* do NOT count during dead node elimination */ @@ -873,10 +937,15 @@ void stat_new_node(const ir_node *node) STAT_LEAVE; } -/* A node is changed into a Id node */ -void stat_turn_into_id(const ir_node *node) +/** + * A node is changed into a Id node + * + * @param ctx the hook context + * @param node the IR node that will be turned into an ID + */ +static void stat_turn_into_id(void *ctx, ir_node *node) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER; @@ -898,10 +967,16 @@ void stat_turn_into_id(const ir_node *node) STAT_LEAVE; } -/* A new graph was created */ -void stat_new_graph(ir_graph *irg, entity *ent) +/** + * A new graph was created + * + * @param ctx the hook context + * @param irg the new IR graph that was created + * @param ent the entity of this graph + */ +static void stat_new_graph(void *ctx, ir_graph *irg, entity *ent) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER; @@ -909,18 +984,30 @@ void stat_new_graph(ir_graph *irg, entity *ent) /* execute for side effect :-) */ graph_entry_t * graph = graph_get_entry(irg, status->irg_hash); - graph->ent = ent; - graph->deleted = 0; + graph->ent = ent; + graph->is_deleted = 0; + graph->is_leaf = 0; + graph->is_leaf_call = 0; + graph->is_recursive = 0; + graph->is_chain_call = 0; + graph->is_analyzed = 0; } STAT_LEAVE; } -/* - * A graph was deleted +/** + * A graph will be deleted + * + * @param ctx the hook context + * @param irg the IR graph that will be deleted + * + * Note that we still hold the information for this graph + * in our hash maps, only a flag is set which prevents this + * information from being changed, it's "frozen" from now. */ -void stat_free_graph(ir_graph *irg) +static void stat_free_graph(void *ctx, ir_graph *irg) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER; @@ -928,23 +1015,27 @@ void stat_free_graph(ir_graph *irg) graph_entry_t *graph = graph_get_entry(irg, status->irg_hash); graph_entry_t *global = graph_get_entry(NULL, status->irg_hash); - graph->deleted = 1; - - /* count the nodes of the graph yet, it will be destroyed later */ - count_nodes_in_graph(global, graph); + graph->is_deleted = 1; - /* calculate the pattern */ - stat_calc_pattern_history(irg); + if (status->stat_options & FIRMSTAT_COUNT_DELETED) { + /* count the nodes of the graph yet, it will be destroyed later */ + update_graph_stat(global, graph); + } } STAT_LEAVE; } -/* +/** * A walk over a graph is initiated. Do not count walks from statistic code. + * + * @param ctx the hook context + * @param irg the IR graph that will be walked + * @param pre the pre walker + * @param post the post walker */ -void stat_irg_walk(ir_graph *irg, void *pre, void *post) +static void stat_irg_walk(void *ctx, ir_graph *irg, void *pre, void *post) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER_SINGLE; @@ -956,21 +1047,32 @@ void stat_irg_walk(ir_graph *irg, void *pre, void *post) STAT_LEAVE; } -/* +/** * A walk over a graph in block-wise order is initiated. Do not count walks from statistic code. + * + * @param ctx the hook context + * @param irg the IR graph that will be walked + * @param pre the pre walker + * @param post the post walker */ -void stat_irg_walk_blkwise(ir_graph *irg, void *pre, void *post) +static void stat_irg_walk_blkwise(void *ctx, ir_graph *irg, void *pre, void *post) { /* for now, do NOT differentiate between blockwise and normal */ - stat_irg_walk(irg, pre, post); + stat_irg_walk(ctx, irg, pre, post); } -/* +/** * A walk over the graph's blocks is initiated. Do not count walks from statistic code. + * + * @param ctx the hook context + * @param irg the IR graph that will be walked + * @param node the IR node + * @param pre the pre walker + * @param post the post walker */ -void stat_irg_block_walk(ir_graph *irg, const ir_node *node, void *pre, void *post) +static void stat_irg_block_walk(void *ctx, ir_graph *irg, ir_node *node, void *pre, void *post) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER_SINGLE; @@ -983,26 +1085,32 @@ void stat_irg_block_walk(ir_graph *irg, const ir_node *node, void *pre, void *po } /** - * called for every node that is removed due to an optimization + * called for every node that is removed due to an optimization. + * + * @param n the IR node that will be removed + * @param hmap the hash map containing ir_op* -> opt_entry_t* */ -static void removed_due_opt(ir_node *n, pset *set) +static void removed_due_opt(ir_node *n, hmap_opt_entry_t *hmap) { - ir_op *op = get_irn_op(n); - opt_entry_t *entry = opt_get_entry(op, set); + ir_op *op = stat_get_irn_op(n); + opt_entry_t *entry = opt_get_entry(op, hmap); /* increase global value */ cnt_inc(&entry->count); } -/* - * Some nodes were optimized into some others due to an optimization +/** + * Some nodes were optimized into some others due to an optimization. + * + * @param ctx the hook context */ -void stat_merge_nodes( +static void stat_merge_nodes( + void *ctx, ir_node **new_node_array, int new_num_entries, ir_node **old_node_array, int old_num_entries, - stat_opt_kind opt) + hook_opt_kind opt) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER; @@ -1010,6 +1118,9 @@ void stat_merge_nodes( int i, j; graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash); + if (status->reassoc_run) + opt = HOOK_OPT_REASSOC; + for (i = 0; i < old_num_entries; ++i) { for (j = 0; j < new_num_entries; ++j) if (old_node_array[i] == new_node_array[j]) @@ -1024,29 +1135,55 @@ void stat_merge_nodes( STAT_LEAVE; } -/* +/** + * Reassociation is started/stopped. + * + * @param ctx the hook context + * @param flag if non-zero, reassociation is started else stopped + */ +static void stat_reassociate(void *ctx, int flag) +{ + if (! status->stat_options) + return; + + STAT_ENTER; + { + status->reassoc_run = flag; + } + STAT_LEAVE; +} + +/** * A node was lowered into other nodes + * + * @param ctx the hook context + * @param node the IR node that will be lowered */ -void stat_lower(ir_node *node) +static void stat_lower(void *ctx, ir_node *node) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER; { graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash); - removed_due_opt(node, graph->opt_hash[STAT_LOWERED]); + removed_due_opt(node, graph->opt_hash[HOOK_LOWERED]); } STAT_LEAVE; } -/* - * A graph was inlined +/** + * A graph was inlined. + * + * @param ctx the hook context + * @param call the IR call that will re changed into the body of + * the called IR graph + * @param called_irg the IR graph representing the called routine */ -void stat_inline(ir_node *call, ir_graph *called_irg) +static void stat_inline(void *ctx, ir_node *call, ir_graph *called_irg) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER; @@ -1061,32 +1198,147 @@ void stat_inline(ir_node *call, ir_graph *called_irg) STAT_LEAVE; } -/* +/** + * A graph with tail-recursions was optimized. + * + * @param ctx the hook context + */ +static void stat_tail_rec(void *ctx, ir_graph *irg) +{ + if (! status->stat_options) + return; + + STAT_ENTER; + { + } + STAT_LEAVE; +} + +/** + * Strength reduction was performed on an iteration variable. + * + * @param ctx the hook context + */ +static void stat_strength_red(void *ctx, ir_graph *irg, ir_node *strong, ir_node *cmp) +{ + if (! status->stat_options) + return; + + STAT_ENTER; + { + graph_entry_t *graph = graph_get_entry(irg, status->irg_hash); + cnt_inc(&graph->cnt_strength_red); + + removed_due_opt(strong, graph->opt_hash[HOOK_OPT_STRENGTH_RED]); + } + STAT_LEAVE; +} + +/** * Start the dead node elimination. + * + * @param ctx the hook context */ -void stat_dead_node_elim_start(ir_graph *irg) +static void stat_dead_node_elim_start(void *ctx, ir_graph *irg) { - if (! status->enable) + if (! status->stat_options) return; ++status->in_dead_node_elim; } -/* +/** * Stops the dead node elimination. + * + * @param ctx the hook context */ -void stat_dead_node_elim_stop(ir_graph *irg) +static void stat_dead_node_elim_stop(void *ctx, ir_graph *irg) { - if (! status->enable) + if (! status->stat_options) return; --status->in_dead_node_elim; } +/** + * A multiply was replaced by a series of Shifts/Adds/Subs + * + * @param ctx the hook context + */ +static void stat_arch_dep_replace_mul_with_shifts(void *ctx, ir_node *mul) +{ + if (! status->stat_options) + return; + + STAT_ENTER; + { + graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash); + removed_due_opt(mul, graph->opt_hash[HOOK_OPT_ARCH_DEP]); + } + STAT_LEAVE; +} + +/** + * A division was replaced by a series of Shifts/Muls + * + * @param ctx the hook context + * @param div the div node that will be optimized + */ +static void stat_arch_dep_replace_div_by_const(void *ctx, ir_node *div) +{ + if (! status->stat_options) + return; + + STAT_ENTER; + { + graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash); + removed_due_opt(div, graph->opt_hash[HOOK_OPT_ARCH_DEP]); + } + STAT_LEAVE; +} + +/** + * A modulo was replaced by a series of Shifts/Muls + * + * @param ctx the hook context + * @param mod the mod node that will be optimized + */ +static void stat_arch_dep_replace_mod_by_const(void *ctx, ir_node *mod) +{ + if (! status->stat_options) + return; + + STAT_ENTER; + { + graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash); + removed_due_opt(mod, graph->opt_hash[HOOK_OPT_ARCH_DEP]); + } + STAT_LEAVE; +} + +/** + * A DivMod was replaced by a series of Shifts/Muls + * + * @param ctx the hook context + * @param divmod the divmod node that will be optimized + */ +static void stat_arch_dep_replace_DivMod_by_const(void *ctx, ir_node *divmod) +{ + if (! status->stat_options) + return; + + STAT_ENTER; + { + graph_entry_t *graph = graph_get_entry(current_ir_graph, status->irg_hash); + removed_due_opt(divmod, graph->opt_hash[HOOK_OPT_ARCH_DEP]); + } + STAT_LEAVE; +} + /* Finish the statistics */ -void stat_finish(void) +void stat_finish(const char *name) { - if (! status->enable) + if (! status->stat_options) return; STAT_ENTER; @@ -1094,7 +1346,9 @@ void stat_finish(void) graph_entry_t *entry; graph_entry_t *global = graph_get_entry(NULL, status->irg_hash); - /* dump per graph */ + stat_dump_init(name); + + /* calculate the graph statistics */ for (entry = pset_first(status->irg_hash); entry; entry = pset_next(status->irg_hash)) { if (entry->irg == NULL) { @@ -1102,66 +1356,164 @@ void stat_finish(void) continue; } - if (! entry->deleted) { + if (! entry->is_deleted) { /* the graph is still alive, count the nodes on it */ - count_nodes_in_graph(global, entry); + update_graph_stat(global, entry); + } + } - /* calculate the pattern */ - stat_calc_pattern_history(entry->irg); + /* some calculations are dependent, we pushed them on the wait_q */ + while (! pdeq_empty(status->wait_q)) { + entry = pdeq_getr(status->wait_q); + + update_graph_stat_2(global, entry); + } + + + /* dump per graph */ + for (entry = pset_first(status->irg_hash); entry; entry = pset_next(status->irg_hash)) { + + if (entry->irg == NULL) { + /* special entry for the global count */ + continue; } - dump_graph(entry); + if (! entry->is_deleted || status->stat_options & FIRMSTAT_COUNT_DELETED) + stat_dump_graph(entry); + + if (! entry->is_deleted) { + /* clear the counter that are not accumulated */ + graph_clear_entry(entry, 0); + } } /* dump global */ - dump_graph(global); - dump_finish(); + stat_dump_graph(global); + stat_dump_finish(); stat_finish_pattern_history(); + /* clear the global counter here */ + { + node_entry_t *entry; + + for (entry = pset_first(global->opcode_hash); entry; entry = pset_next(global->opcode_hash)) { + opcode_clear_entry(entry); + } + /* clear all global counter */ + graph_clear_entry(global, 1); + } + /* finished */ - status->enable = 0; +// status->stat_options = 0; } STAT_LEAVE; } -#else +/** the hook entries for the Firm statistics module */ +static hook_entry_t stat_hooks[hook_last]; -/* need this for prototypes */ -#define FIRM_STATISTICS -#include "firmstat.h" +/* initialize the statistics module. */ +void init_stat(unsigned enable_options) +{ +#define X(a) a, sizeof(a)-1 +#define HOOK(h, fkt) \ + stat_hooks[h].hook._##h = fkt; register_hook(h, &stat_hooks[h]) -void init_stat(void) {} + /* enable statistics */ + status->stat_options = enable_options & FIRMSTAT_ENABLED ? enable_options : 0; -void stat_finish(void) {} + if (! status->stat_options) + return; -void stat_new_ir_op(const ir_op *op) {} + /* register all hooks */ + HOOK(hook_new_ir_op, stat_new_ir_op); + HOOK(hook_free_ir_op, stat_free_ir_op); + HOOK(hook_new_node, stat_new_node); + HOOK(hook_turn_into_id, stat_turn_into_id); + HOOK(hook_new_graph, stat_new_graph); + HOOK(hook_free_graph, stat_free_graph); + HOOK(hook_irg_walk, stat_irg_walk); + HOOK(hook_irg_walk_blkwise, stat_irg_walk_blkwise); + HOOK(hook_irg_block_walk, stat_irg_block_walk); + HOOK(hook_merge_nodes, stat_merge_nodes); + HOOK(hook_reassociate, stat_reassociate); + HOOK(hook_lower, stat_lower); + HOOK(hook_inline, stat_inline); + HOOK(hook_tail_rec, stat_tail_rec); + HOOK(hook_strength_red, stat_strength_red); + HOOK(hook_dead_node_elim_start, stat_dead_node_elim_start); + HOOK(hook_dead_node_elim_stop, stat_dead_node_elim_stop); + HOOK(hook_arch_dep_replace_mul_with_shifts, stat_arch_dep_replace_mul_with_shifts); + HOOK(hook_arch_dep_replace_div_by_const, stat_arch_dep_replace_div_by_const); + HOOK(hook_arch_dep_replace_mod_by_const, stat_arch_dep_replace_mod_by_const); + HOOK(hook_arch_dep_replace_DivMod_by_const, stat_arch_dep_replace_DivMod_by_const); -void stat_free_ir_op(const ir_op *op) {} + obstack_init(&status->cnts); -void stat_new_node(const ir_node *node) {} + /* create the hash-tables */ + status->irg_hash = new_pset(graph_cmp, 8); + status->ir_op_hash = new_pset(opcode_cmp_2, 1); -void stat_turn_into_id(const ir_node *node) {} + /* create the wait queue */ + status->wait_q = new_pdeq(); -void stat_new_graph(ir_graph *irg, entity *ent) {} + if (enable_options & FIRMSTAT_COUNT_STRONG_OP) { + /* build the pseudo-ops */ + _op_Phi0.code = get_next_ir_opcode(); + _op_Phi0.name = new_id_from_chars(X("Phi0")); -void stat_free_graph(ir_graph *irg) {} + _op_PhiM.code = get_next_ir_opcode(); + _op_PhiM.name = new_id_from_chars(X("PhiM")); -void stat_irg_walk(ir_graph *irg, void *pre, void *post) {} + _op_ProjM.code = get_next_ir_opcode(); + _op_ProjM.name = new_id_from_chars(X("ProjM")); -void stat_irg_block_walk(ir_graph *irg, const ir_node *node, void *pre, void *post) {} + _op_MulC.code = get_next_ir_opcode(); + _op_MulC.name = new_id_from_chars(X("MulC")); -void stat_merge_nodes( - ir_node **new_node_array, int new_num_entries, - ir_node **old_node_array, int old_num_entries, - stat_opt_kind opt) {} + _op_DivC.code = get_next_ir_opcode(); + _op_DivC.name = new_id_from_chars(X("DivC")); -void stat_lower(ir_node *node) {} + _op_ModC.code = get_next_ir_opcode(); + _op_ModC.name = new_id_from_chars(X("ModC")); -void stat_inline(ir_node *call, ir_graph *irg) {} + _op_DivModC.code = get_next_ir_opcode(); + _op_DivModC.name = new_id_from_chars(X("DivModC")); -void stat_dead_node_elim_start(ir_graph *irg) {} + status->op_Phi0 = &_op_Phi0; + status->op_PhiM = &_op_PhiM; + status->op_ProjM = &_op_ProjM; + status->op_MulC = &_op_MulC; + status->op_DivC = &_op_DivC; + status->op_ModC = &_op_ModC; + status->op_DivModC = &_op_DivModC; + } + else { + status->op_Phi0 = NULL; + status->op_PhiM = NULL; + status->op_ProjM = NULL; + status->op_MulC = NULL; + status->op_DivC = NULL; + status->op_ModC = NULL; + status->op_DivModC = NULL; + } -void stat_dead_node_elim_stop(ir_graph *irg) {} + /* register the dumper */ + stat_register_dumper(&simple_dumper); -#endif + if (enable_options & FIRMSTAT_CSV_OUTPUT) + stat_register_dumper(&csv_dumper); + + /* initialize the pattern hash */ + stat_init_pattern_history(enable_options & FIRMSTAT_PATTERN_ENABLED); +#undef HOOK +#undef X +} + +#else + +/* Finish the statistics */ +void stat_finish(const char *name) {} + +#endif /* FIRM_STATISTICS */