/*
- * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
*/
static ir_tarval *computed_value_Proj_Cmp(const ir_node *n)
{
- ir_node *cmp = get_Proj_pred(n);
- ir_node *left = get_Cmp_left(cmp);
- ir_node *right = get_Cmp_right(cmp);
- long pn_cmp = get_Proj_proj(n);
- ir_mode *mode = get_irn_mode(left);
+ ir_node *cmp = get_Proj_pred(n);
+ ir_node *left = get_Cmp_left(cmp);
+ ir_node *right = get_Cmp_right(cmp);
+ pn_Cmp pn_cmp = get_Proj_pn_cmp(n);
+ ir_mode *mode = get_irn_mode(left);
ir_tarval *tv_l, *tv_r;
/*
irg = get_irn_irg(phi);
for (i = 0; i < n; ++i) {
pred = get_irn_n(phi, i);
- res[i] = new_r_Const(irg, res[i]);
+ res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
}
return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_binop_on_phi */
irg = get_irn_irg(a);
for (i = 0; i < n; ++i) {
pred = get_irn_n(a, i);
- res[i] = new_r_Const(irg, res[i]);
+ res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
}
return new_r_Phi(get_nodes_block(a), n, (ir_node **)res, mode);
} /* apply_binop_on_2_phis */
irg = get_irn_irg(phi);
for (i = 0; i < n; ++i) {
pred = get_irn_n(phi, i);
- res[i] = new_r_Const(irg, res[i]);
+ res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
}
return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_unop_on_phi */
irg = get_irn_irg(phi);
for (i = 0; i < n; ++i) {
pred = get_irn_n(phi, i);
- res[i] = new_r_Const(irg, res[i]);
+ res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
}
return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_conv_on_phi */
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg));
+ set_Tuple_pred(n, pn_Div_X_except, get_irg_bad(irg));
set_Tuple_pred(n, pn_Div_res, value);
}
return n;
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg));
+ set_Tuple_pred(n, pn_Mod_X_except, get_irg_bad(irg));
set_Tuple_pred(n, pn_Mod_res, value);
}
return n;
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_DivMod_X_except, new_r_Bad(irg)); /*no exception*/
+ set_Tuple_pred(n, pn_DivMod_X_except, get_irg_bad(irg)); /*no exception*/
set_Tuple_pred(n, pn_DivMod_res_div, va);
set_Tuple_pred(n, pn_DivMod_res_mod, vb);
}
turn_into_tuple(n, pn_Quot_max);
set_Tuple_pred(n, pn_Quot_M, mem);
set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Quot_X_except, new_r_Bad(irg));
+ set_Tuple_pred(n, pn_Quot_X_except, get_irg_bad(irg));
set_Tuple_pred(n, pn_Quot_res, m);
DBG_OPT_ALGSIM1(oldn, a, b, m, FS_OPT_FP_INV_MUL);
}
jmp = new_r_Jmp(blk);
turn_into_tuple(n, pn_Cond_max);
if (ta == tarval_b_true) {
- set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg));
+ set_Tuple_pred(n, pn_Cond_false, get_irg_bad(irg));
set_Tuple_pred(n, pn_Cond_true, jmp);
} else {
set_Tuple_pred(n, pn_Cond_false, jmp);
- set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg));
+ set_Tuple_pred(n, pn_Cond_true, get_irg_bad(irg));
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(irg), blk);
ir_node *pred_b = get_Proj_pred(b);
if (pred_a == pred_b) {
dbg_info *dbgi = get_irn_dbg_info(n);
- pn_Cmp pn_a = get_Proj_proj(a);
- pn_Cmp pn_b = get_Proj_proj(b);
+ pn_Cmp pn_a = get_Proj_pn_cmp(a);
+ pn_Cmp pn_b = get_Proj_pn_cmp(b);
/* yes, we can simply calculate with pncs */
pn_Cmp new_pnc = pn_a & pn_b;
ir_node *pred_b = get_Proj_pred(b);
if (pred_a == pred_b) {
dbg_info *dbgi = get_irn_dbg_info(n);
- pn_Cmp pn_a = get_Proj_proj(a);
- pn_Cmp pn_b = get_Proj_proj(b);
+ pn_Cmp pn_a = get_Proj_pn_cmp(a);
+ pn_Cmp pn_b = get_Proj_pn_cmp(b);
/* yes, we can simply calculate with pncs */
pn_Cmp new_pnc = pn_a ^ pn_b;
ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return new_r_Bad(irg);
+ return get_irg_bad(irg);
}
case pn_Div_M: {
ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return new_r_Bad(irg);
+ return get_irg_bad(irg);
}
case pn_Mod_M: {
/* we found an exception handler, remove it */
ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- return new_r_Bad(irg);
+ return get_irg_bad(irg);
}
case pn_DivMod_M: {
* AND c1 ===> OR if (c1 | c2) == 0x111..11
* OR
*/
-static ir_node *transform_node_Or_bf_store(ir_node *or)
+static ir_node *transform_node_Or_bf_store(ir_node *irn_or)
{
- ir_node *and, *c1;
+ ir_node *irn_and, *c1;
ir_node *or_l, *c2;
ir_node *and_l, *c3;
ir_node *value, *c4;
ir_node *new_and, *new_const, *block;
- ir_mode *mode = get_irn_mode(or);
+ ir_mode *mode = get_irn_mode(irn_or);
ir_tarval *tv1, *tv2, *tv3, *tv4, *tv;
for (;;) {
ir_graph *irg;
- get_comm_Binop_Ops(or, &and, &c1);
- if (!is_Const(c1) || !is_And(and))
- return or;
+ get_comm_Binop_Ops(irn_or, &irn_and, &c1);
+ if (!is_Const(c1) || !is_And(irn_and))
+ return irn_or;
- get_comm_Binop_Ops(and, &or_l, &c2);
+ get_comm_Binop_Ops(irn_and, &or_l, &c2);
if (!is_Const(c2))
- return or;
+ return irn_or;
tv1 = get_Const_tarval(c1);
tv2 = get_Const_tarval(c2);
tv = tarval_or(tv1, tv2);
if (tarval_is_all_one(tv)) {
/* the AND does NOT clear a bit with isn't set by the OR */
- set_Or_left(or, or_l);
- set_Or_right(or, c1);
+ set_Or_left(irn_or, or_l);
+ set_Or_right(irn_or, c1);
/* check for more */
continue;
}
if (!is_Or(or_l))
- return or;
+ return irn_or;
get_comm_Binop_Ops(or_l, &and_l, &c3);
if (!is_Const(c3) || !is_And(and_l))
- return or;
+ return irn_or;
get_comm_Binop_Ops(and_l, &value, &c4);
if (!is_Const(c4))
- return or;
+ return irn_or;
/* ok, found the pattern, check for conditions */
- assert(mode == get_irn_mode(and));
+ assert(mode == get_irn_mode(irn_and));
assert(mode == get_irn_mode(or_l));
assert(mode == get_irn_mode(and_l));
tv = tarval_or(tv4, tv2);
if (!tarval_is_all_one(tv)) {
/* have at least one 0 at the same bit position */
- return or;
+ return irn_or;
}
if (tv3 != tarval_andnot(tv3, tv4)) {
/* bit in the or_mask is outside the and_mask */
- return or;
+ return irn_or;
}
if (tv1 != tarval_andnot(tv1, tv2)) {
/* bit in the or_mask is outside the and_mask */
- return or;
+ return irn_or;
}
/* ok, all conditions met */
- block = get_irn_n(or, -1);
+ block = get_irn_n(irn_or, -1);
irg = get_irn_irg(block);
new_and = new_r_And(block, value, new_r_Const(irg, tarval_and(tv4, tv2)), mode);
new_const = new_r_Const(irg, tarval_or(tv3, tv1));
- set_Or_left(or, new_and);
- set_Or_right(or, new_const);
+ set_Or_left(irn_or, new_and);
+ set_Or_right(irn_or, new_const);
/* check for more */
}
/**
* Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rotl
*/
-static ir_node *transform_node_Or_Rotl(ir_node *or)
+static ir_node *transform_node_Or_Rotl(ir_node *irn_or)
{
- ir_mode *mode = get_irn_mode(or);
+ ir_mode *mode = get_irn_mode(irn_or);
ir_node *shl, *shr, *block;
ir_node *irn, *x, *c1, *c2, *n;
ir_tarval *tv1, *tv2;
/* some backends can't handle rotl */
if (!be_get_backend_param()->support_rotl)
- return or;
+ return irn_or;
if (! mode_is_int(mode))
- return or;
+ return irn_or;
- shl = get_binop_left(or);
- shr = get_binop_right(or);
+ shl = get_binop_left(irn_or);
+ shr = get_binop_right(irn_or);
if (is_Shr(shl)) {
if (!is_Shl(shr))
- return or;
+ return irn_or;
irn = shl;
shl = shr;
shr = irn;
} else if (!is_Shl(shl)) {
- return or;
+ return irn_or;
} else if (!is_Shr(shr)) {
- return or;
+ return irn_or;
}
x = get_Shl_left(shl);
if (x != get_Shr_left(shr))
- return or;
+ return irn_or;
c1 = get_Shl_right(shl);
c2 = get_Shr_right(shr);
if (is_Const(c1) && is_Const(c2)) {
tv1 = get_Const_tarval(c1);
if (! tarval_is_long(tv1))
- return or;
+ return irn_or;
tv2 = get_Const_tarval(c2);
if (! tarval_is_long(tv2))
- return or;
+ return irn_or;
if (get_tarval_long(tv1) + get_tarval_long(tv2)
!= (int) get_mode_size_bits(mode))
- return or;
+ return irn_or;
/* yet, condition met */
- block = get_nodes_block(or);
+ block = get_nodes_block(irn_or);
n = new_r_Rotl(block, x, c1, mode);
- DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROTL);
+ DBG_OPT_ALGSIM1(irn_or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
}
/* Note: the obvious rot formulation (a << x) | (a >> (32-x)) gets
* transformed to (a << x) | (a >> -x) by transform_node_shift_modulo() */
if (!is_negated_value(c1, c2)) {
- return or;
+ return irn_or;
}
/* yet, condition met */
- block = get_nodes_block(or);
+ block = get_nodes_block(irn_or);
n = new_r_Rotl(block, x, c1, mode);
- DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROTL);
+ DBG_OPT_ALGSIM0(irn_or, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
} /* transform_node_Or_Rotl */
ir_node *pred_b = get_Proj_pred(b);
if (pred_a == pred_b) {
dbg_info *dbgi = get_irn_dbg_info(n);
- pn_Cmp pn_a = get_Proj_proj(a);
- pn_Cmp pn_b = get_Proj_proj(b);
+ pn_Cmp pn_a = get_Proj_pn_cmp(a);
+ pn_Cmp pn_b = get_Proj_pn_cmp(b);
/* yes, we can simply calculate with pncs */
pn_Cmp new_pnc = pn_a | pn_b;
if (is_Cmp(cmp) && is_Const(t) &&
(!is_Const(f) || (is_Const_null(t) && !is_Const_null(f)))) {
- pn_Cmp pnc = get_Proj_proj(sel);
+ pn_Cmp pnc = get_Proj_pn_cmp(sel);
ir_node *tmp = t;
t = f;
f = tmp;
/** Compares the attributes of two Proj nodes. */
static int node_cmp_attr_Proj(ir_node *a, ir_node *b)
{
- return a->attr.proj != b->attr.proj;
+ return a->attr.proj.proj != b->attr.proj.proj;
} /* node_cmp_attr_Proj */
/** Compares the attributes of two Alloc nodes. */
ir_normalize_node(n);
/* lookup or insert in hash table with given hash key. */
- nn = pset_insert(value_table, n, ir_node_hash(n));
+ nn = (ir_node*)pset_insert(value_table, n, ir_node_hash(n));
if (nn != n) {
/* n is reachable again */
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
- foreach_pset(irg->value_table, node) {
+ foreach_pset(irg->value_table, ir_node*, node) {
visit(node, env);
}
current_ir_graph = rem;
if (is_Block(block)) {
if (is_Block_dead(block)) {
/* control flow from dead block is dead */
- return new_r_Bad(irg);
+ return get_irg_bad(irg);
}
for (i = get_irn_arity(block) - 1; i >= 0; --i) {
* but can be found by irg_walk()!
*/
set_Block_dead(block);
- return new_r_Bad(irg);
+ return get_irg_bad(irg);
}
}
}
/* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
blocks predecessors is dead. */
- if (op != op_Block && op != op_Phi && op != op_Tuple) {
+ if (op != op_Block && op != op_Phi && op != op_Tuple && op != op_Anchor) {
ir_graph *irg = get_irn_irg(node);
irn_arity = get_irn_arity(node);
*/
if (is_irn_pinned_in_irg(node) &&
is_Block_dead(get_nodes_block(skip_Proj(node))))
- return new_r_Bad(irg);
+ return get_irg_bad(irg);
for (i = 0; i < irn_arity; i++) {
ir_node *pred = get_irn_n(node, i);
if (is_Bad(pred))
- return new_r_Bad(irg);
+ return get_irg_bad(irg);
#if 0
/* Propagating Unknowns here seems to be a bad idea, because
sometimes we need a node as a input and did not want that
for (i = 0; i < irn_arity; i++) {
if (!is_Bad(get_irn_n(node, i))) break;
}
- if (i == irn_arity) node = new_r_Bad(irg);
+ if (i == irn_arity) node = get_irg_bad(irg);
}
#endif
return node;
{
ir_node *oldn = n;
ir_graph *irg = get_irn_irg(n);
- ir_opcode iro = get_irn_opcode(n);
+ unsigned iro = get_irn_opcode(n);
ir_tarval *tv;
/* Always optimize Phi nodes: part of the construction. */
tv = computed_value(n);
if (tv != tarval_bad) {
ir_node *nw;
- int node_size;
+ size_t node_size;
/*
- * we MUST copy the node here temporary, because it's still needed
- * for DBG_OPT_CSTEVAL
+ * we MUST copy the node here temporary, because it's still
+ * needed for DBG_OPT_CSTEVAL
*/
node_size = offsetof(ir_node, attr) + n->op->attr_size;
- oldn = alloca(node_size);
+ oldn = (ir_node*)alloca(node_size);
memcpy(oldn, n, node_size);
CLONE_ARR_A(ir_node *, oldn->in, n->in);
{
ir_tarval *tv;
ir_node *oldn = n;
- ir_opcode iro = get_irn_opcode(n);
+ unsigned iro = get_irn_opcode(n);
if (!get_opt_optimize() && !is_Phi(n)) return n;