/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "irnodeset.h"
#include "irpass.h"
#include "tv_t.h"
+#include "irtools.h"
#include "irprintf.h"
#include "irdump.h"
* An opcode map key.
*/
struct opcode_key_t {
- ir_opcode code; /**< The Firm opcode. */
+ unsigned code; /**< The Firm opcode. */
ir_mode *mode; /**< The mode of all nodes in the partition. */
int arity; /**< The arity of this opcode (needed for Phi etc. */
union {
ir_entity *ent; /**< For Sel Nodes, its entity */
int intVal; /**< For Conv/Div Nodes: strict/remainderless */
unsigned uintVal;/**< for Builtin: the kind */
- ir_node *block; /**< for Block: itself */
+ ir_node *irn; /**< for nodes that never be construent: the node itself */
void *ptr; /**< generic pointer for hash/cmp */
} u;
};
* have to use this union.
*/
typedef union {
- tarval *tv;
+ ir_tarval *tv;
symconst_symbol sym;
} lattice_elem_t;
DEBUG_ONLY(static unsigned part_nr = 0);
/** The tarval returned by Unknown nodes: set to either tarval_bad OR tarval_top. */
-static tarval *tarval_UNKNOWN;
+static ir_tarval *tarval_UNKNOWN;
/* forward */
static node_t *identity(node_t *node);
}
} /* check_partition */
+/**
+ * return the result mode of a node (part of combo's opcode).
+ */
+static ir_mode *get_irn_resmode(const ir_node *irn)
+{
+ switch (get_irn_opcode(irn)) {
+ case iro_Load:
+ return get_Load_mode(irn);
+ case iro_Div:
+ return get_Div_resmode(irn);
+ case iro_Mod:
+ return get_Mod_resmode(irn);
+ default:
+ return get_irn_mode(irn);
+ }
+} /* get_irn_resmode */
+
/**
* check that all leader nodes in the partition have the same opcode.
*/
if (first) {
key.code = get_irn_opcode(irn);
- key.mode = get_irn_mode(irn);
+ key.mode = get_irn_resmode(irn);
key.arity = get_irn_arity(irn);
key.u.proj = 0;
key.u.ent = NULL;
key.u.intVal = get_Div_no_remainder(irn);
break;
case iro_Block:
- key.u.block = irn;
- break;
- case iro_Load:
- key.mode = get_Load_mode(irn);
+ case iro_ASM:
+ key.u.irn = irn;
break;
case iro_Builtin:
key.u.intVal = get_Builtin_kind(irn);
}
first = 0;
} else {
- assert((unsigned)key.code == get_irn_opcode(irn));
- assert(key.mode == get_irn_mode(irn));
+ assert((unsigned)key.code == get_irn_opcode(irn));
+ assert(key.mode == get_irn_resmode(irn));
assert(key.arity == get_irn_arity(irn));
switch (get_irn_opcode(irn)) {
assert(key.u.intVal == get_Div_no_remainder(irn));
break;
case iro_Block:
- assert(key.u.block == irn);
- break;
- case iro_Load:
- assert(key.mode == get_Load_mode(irn));
+ case iro_ASM:
+ assert(key.u.irn == irn);
break;
case iro_Builtin:
- assert(key.u.intVal == (int) get_Builtin_kind(irn));
+ assert(key.u.intVal == (int)get_Builtin_kind(irn));
break;
default:
break;
*/
static int listmap_cmp_ptr(const void *elt, const void *key, size_t size)
{
- const listmap_entry_t *e1 = elt;
- const listmap_entry_t *e2 = key;
+ const listmap_entry_t *e1 = (listmap_entry_t*)elt;
+ const listmap_entry_t *e2 = (listmap_entry_t*)key;
(void) size;
return e1->id != e2->id;
key.id = id;
key.list = NULL;
key.next = NULL;
- entry = set_insert(map->map, &key, sizeof(key), HASH_PTR(id));
+ entry = (listmap_entry_t*)set_insert(map->map, &key, sizeof(key), HASH_PTR(id));
if (entry->list == NULL) {
/* a new entry, put into the list */
*/
static unsigned opcode_hash(const opcode_key_t *entry)
{
- return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ptr) + entry->arity;
+ return (unsigned)(PTR_TO_INT(entry->mode) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ptr) + entry->arity);
} /* opcode_hash */
/**
*/
static int cmp_opcode(const void *elt, const void *key, size_t size)
{
- const opcode_key_t *o1 = elt;
- const opcode_key_t *o2 = key;
+ const opcode_key_t *o1 = (opcode_key_t*)elt;
+ const opcode_key_t *o2 = (opcode_key_t*)key;
(void) size;
return o1->code != o2->code || o1->mode != o2->mode ||
*/
static int cmp_def_use_edge(const void *a, const void *b)
{
- const ir_def_use_edge *ea = a;
- const ir_def_use_edge *eb = b;
+ const ir_def_use_edge *ea = (const ir_def_use_edge*)a;
+ const ir_def_use_edge *eb = (const ir_def_use_edge*)b;
/* no overrun, because range is [-1, MAXINT] */
return ea->pos - eb->pos;
*
* @return the associated type of this node
*/
-static inline tarval *get_node_tarval(const ir_node *irn)
+static inline ir_tarval *get_node_tarval(const ir_node *irn)
{
lattice_elem_t type = get_node_type(irn);
*/
static void create_initial_partitions(ir_node *irn, void *ctx)
{
- environment_t *env = ctx;
+ environment_t *env = (environment_t*)ctx;
partition_t *part = env->initial;
node_t *node;
continue;
if (is_constant_type(y->type)) {
- ir_opcode code = get_irn_opcode(succ);
+ unsigned code = get_irn_opcode(succ);
if (code == iro_Sub || code == iro_Cmp)
add_to_cprop(y, env);
}
y = get_irn_node(succ);
if (is_constant_type(y->type)) {
- ir_opcode code = get_irn_opcode(succ);
+ unsigned code = get_irn_opcode(succ);
if (code == iro_Eor)
add_to_cprop(y, env);
}
ir_node *irn = node->node;
key.code = get_irn_opcode(irn);
- key.mode = get_irn_mode(irn);
+ key.mode = get_irn_resmode(irn);
key.arity = get_irn_arity(irn);
key.u.proj = 0;
key.u.ent = NULL;
* We fix it by never letting blocks be congruent
* which cannot be detected by combo either.
*/
- key.u.block = irn;
+ key.u.irn = irn;
break;
- case iro_Load:
- key.mode = get_Load_mode(irn);
+ case iro_ASM:
+ /*
+ * If is difficult to detect when two ASM nodes are congruent: even
+ * if the assembler "text" is identical, the instruction might
+ * have a side effect like flag toggle or function call.
+ * So, do not even try it.
+ *
+ */
+ key.u.irn = irn;
break;
case iro_Builtin:
key.u.intVal = get_Builtin_kind(irn);
break;
}
- entry = set_insert(env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
+ entry = (opcode_key_t*)set_insert(env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
return entry;
} /* lambda_opcode */
node_t *r = get_irn_node(get_Sub_right(sub));
lattice_elem_t a = l->type;
lattice_elem_t b = r->type;
- tarval *tv;
+ ir_tarval *tv;
if (a.tv == tarval_top || b.tv == tarval_top) {
node->type.tv = tarval_top;
node_t *r = get_irn_node(get_Eor_right(eor));
lattice_elem_t a = l->type;
lattice_elem_t b = r->type;
- tarval *tv;
+ ir_tarval *tv;
if (a.tv == tarval_top || b.tv == tarval_top) {
node->type.tv = tarval_top;
node->type.tv = tarval_b_true;
}
} else if (is_con(a) && is_con(b)) {
- /* both nodes are constants, we can probably do something */
- if (mode_is_float(mode)) {
- /* beware of NaN's */
- node->type.tv = tarval_bottom;
- } else {
- node->type.tv = tarval_b_true;
- }
+ node->type.tv = tarval_b_true;
} else {
node->type.tv = tarval_bottom;
}
node_t *r = get_irn_node(get_Cmp_right(cmp));
lattice_elem_t a = l->type;
lattice_elem_t b = r->type;
- pn_Cmp pnc = get_Proj_proj(proj);
- tarval *tv;
+ pn_Cmp pnc = get_Proj_pn_cmp(proj);
+ ir_tarval *tv;
if (a.tv == tarval_top || b.tv == tarval_top) {
node->type.tv = tarval_undefined;
} else if (is_con(a) && is_con(b)) {
default_compute(node);
- } else if (r->part == l->part &&
- (!mode_is_float(get_irn_mode(l->node)) || pnc == pn_Cmp_Lt || pnc == pn_Cmp_Gt)) {
- /*
- * BEWARE: a == a is NOT always True for floating Point values, as
- * NaN != NaN is defined, so we must check this here.
- */
- tv = pnc & pn_Cmp_Eq ? tarval_b_true: tarval_b_false;
+
+ /*
+ * BEWARE: a == a is NOT always True for floating Point values, as
+ * NaN != NaN is defined, so we must check this here.
+ * (while for some pnc we could still optimize we have to stay
+ * consistent with compute_Cmp, so don't do anything for floats)
+ */
+ } else if (r->part == l->part && !mode_is_float(get_irn_mode(l->node))) {
+ tv = pnc & pn_Cmp_Eq ? tarval_b_true : tarval_b_false;
/* if the node was ONCE evaluated by all constants, but now
this breaks AND we get from the argument partitions a different
return;
#endif
- if (is_no_Block(irn)) {
+ if (!is_Block(irn)) {
/* for pinned nodes, check its control input */
if (get_irn_pinned(skip_Proj(irn)) == op_pin_state_pinned) {
node_t *block = get_irn_node(get_nodes_block(irn));
} /* compute */
/*
- * Identity functions: Note that one might thing that identity() is just a
+ * Identity functions: Note that one might think that identity() is just a
* synonym for equivalent_node(). While this is true, we cannot use it for the algorithm
* here, because it expects that the identity node is one of the inputs, which is NOT
* always true for equivalent_node() which can handle (and does sometimes) DAGs.
*/
static node_t *identity_comm_zero_binop(node_t *node)
{
- ir_node *op = node->node;
- node_t *a = get_irn_node(get_binop_left(op));
- node_t *b = get_irn_node(get_binop_right(op));
- ir_mode *mode = get_irn_mode(op);
- tarval *zero;
+ ir_node *op = node->node;
+ node_t *a = get_irn_node(get_binop_left(op));
+ node_t *b = get_irn_node(get_binop_right(op));
+ ir_mode *mode = get_irn_mode(op);
+ ir_tarval *zero;
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
*/
static node_t *identity_shift(node_t *node)
{
- ir_node *op = node->node;
- node_t *b = get_irn_node(get_binop_right(op));
- ir_mode *mode = get_irn_mode(b->node);
- tarval *zero;
+ ir_node *op = node->node;
+ node_t *b = get_irn_node(get_binop_right(op));
+ ir_mode *mode = get_irn_mode(b->node);
+ ir_tarval *zero;
/* node: no input should be tarval_top, else the binop would be also
* Top and not being split. */
*/
static node_t *identity_Mul(node_t *node)
{
- ir_node *op = node->node;
- node_t *a = get_irn_node(get_Mul_left(op));
- node_t *b = get_irn_node(get_Mul_right(op));
- ir_mode *mode = get_irn_mode(op);
- tarval *one;
+ ir_node *op = node->node;
+ node_t *a = get_irn_node(get_Mul_left(op));
+ node_t *b = get_irn_node(get_Mul_right(op));
+ ir_mode *mode = get_irn_mode(op);
+ ir_tarval *one;
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
*/
static node_t *identity_And(node_t *node)
{
- ir_node *and = node->node;
- node_t *a = get_irn_node(get_And_left(and));
- node_t *b = get_irn_node(get_And_right(and));
- tarval *neutral = get_mode_all_one(get_irn_mode(and));
+ ir_node *andnode = node->node;
+ node_t *a = get_irn_node(get_And_left(andnode));
+ node_t *b = get_irn_node(get_And_right(andnode));
+ ir_tarval *neutral = get_mode_all_one(get_irn_mode(andnode));
/* node: no input should be tarval_top, else the And would be also
* Top and not being split. */
*/
static void apply_cf(ir_node *block, void *ctx)
{
- environment_t *env = ctx;
+ environment_t *env = (environment_t*)ctx;
node_t *node = get_irn_node(block);
int i, j, k, n;
ir_node **ins, **in_X;
next = get_Phi_next(phi);
if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
/* this Phi is replaced by a constant */
- tarval *tv = node->type.tv;
- ir_node *c = new_Const(tv);
+ ir_tarval *tv = node->type.tv;
+ ir_node *c = new_r_Const(current_ir_graph, tv);
set_irn_node(c, node);
node->node = c;
* the number of Conv due to CSE. */
ir_node *block = get_nodes_block(leader);
dbg_info *dbg = get_irn_dbg_info(irn);
-
- leader = new_rd_Conv(dbg, block, leader, mode);
+ ir_node *nlead = new_rd_Conv(dbg, block, leader, mode);
+
+ if (nlead != leader) {
+ /* Note: this newly create irn has no node info because
+ * it is created after the analysis. However, this node
+ * replaces the node irn and should not be visited again,
+ * so set its visited count to the count of irn.
+ * Otherwise we might visited this node more than once if
+ * irn had more than one user.
+ */
+ set_irn_node(nlead, NULL);
+ set_irn_visited(nlead, get_irn_visited(irn));
+ leader = nlead;
+ }
}
exchange(irn, leader);
} /* exchange_leader */
*/
static void find_kept_memory(ir_node *irn, void *ctx)
{
- environment_t *env = ctx;
+ environment_t *env = (environment_t*)ctx;
node_t *node, *block;
if (get_irn_mode(irn) != mode_M)
*/
static void apply_result(ir_node *irn, void *ctx)
{
- environment_t *env = ctx;
+ environment_t *env = (environment_t*)ctx;
node_t *node = get_irn_node(irn);
if (is_Block(irn) || is_End(irn) || is_Bad(irn)) {
exchange(irn, jmp);
env->modified = 1;
} else {
- node_t *sel = get_irn_node(get_Cond_selector(cond));
- tarval *tv = sel->type.tv;
+ node_t *sel = get_irn_node(get_Cond_selector(cond));
+ ir_tarval *tv = sel->type.tv;
if (is_tarval(tv) && tarval_is_constant(tv)) {
/* The selector is a constant, but more
} else {
/* normal data node */
if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
- tarval *tv = node->type.tv;
+ ir_tarval *tv = node->type.tv;
/*
* Beware: never replace mode_T nodes by constants. Currently we must mark
*/
if (! is_Const(irn) && get_irn_mode(irn) != mode_T) {
/* can be replaced by a constant */
- ir_node *c = new_Const(tv);
+ ir_node *c = new_r_Const(current_ir_graph, tv);
set_irn_node(c, node);
node->node = c;
DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
static void apply_end(ir_node *end, environment_t *env)
{
int i, j, n = get_End_n_keepalives(end);
- ir_node **in;
+ ir_node **in = NULL;
if (n > 0)
NEW_ARR_A(ir_node *, in, n);
*/
static void set_compute_functions(void)
{
- int i;
+ size_t i, n;
/* set the default compute function */
- for (i = get_irp_n_opcodes() - 1; i >= 0; --i) {
+ for (i = 0, n = get_irp_n_opcodes(); i < n; ++i) {
ir_op *op = get_irp_opcode(i);
op->ops.generic = (op_func)default_compute;
}
/**
* Add memory keeps.
*/
-static void add_memory_keeps(ir_node **kept_memory, int len)
+static void add_memory_keeps(ir_node **kept_memory, size_t len)
{
ir_node *end = get_irg_end(current_ir_graph);
int i;
+ size_t idx;
ir_nodeset_t set;
ir_nodeset_init(&set);
for (i = get_End_n_keepalives(end) - 1; i >= 0; --i)
ir_nodeset_insert(&set, get_End_keepalive(end, i));
- for (i = len - 1; i >= 0; --i) {
- ir_node *ka = kept_memory[i];
+ for (idx = 0; idx < len; ++idx) {
+ ir_node *ka = kept_memory[idx];
if (! ir_nodeset_contains(&set, ka)) {
add_End_keepalive(end, ka);
ir_node *initial_bl;
node_t *start;
ir_graph *rem = current_ir_graph;
- int len;
+ size_t len;
current_ir_graph = irg;