static INLINE int using_irn_link(const ir_graph *irg) { (void) irg; return 0; }
#endif
-/** Normalization: Move Proj nodes into the same block as its predecessors */
-void normalize_proj_nodes(ir_graph *irg);
-
/** set a description for local value n */
void set_irg_loc_description(ir_graph *irg, int n, void *description);
if (node->op == op_Proj)
pred = get_irn_n(node, 0);
else
- pred = get_irn_n(node, -1);
+ pred = get_nodes_block(node);
if (pred->visited < current_ir_graph->visited)
my_irg_walk_2_both(pred, pre, post, env);
}
/* Return the extended block of a node. */
ir_extblk *get_nodes_extbb(ir_node *node) {
- ir_node *block = is_Block(node) ? node : get_irn_n(node, -1);
+ ir_node *block = is_Block(node) ? node : get_nodes_block(node);
return get_Block_extbb(block);
}
if (get_irn_mode(bl->out[i]) == mode_X) {
/* ignore End if we are in the Endblock */
if (get_irn_op(bl->out[i]) == op_End &&
- get_irn_n(bl->out[i], -1) == bl)
+ get_nodes_block(bl->out[i]) == bl)
continue;
else
++n_cfg_outs;
if (get_irn_mode(bl->out[i]) == mode_X) {
/* ignore End if we are in the Endblock */
if (get_irn_op(bl->out[i]) == op_End &&
- get_irn_n(bl->out[i], -1) == bl)
+ get_nodes_block(bl->out[i]) == bl)
continue;
if (out_pos == pos) {
ir_node *cfop = bl->out[i];
/* handle keep-alive here */
if (get_irn_op(cfop) == op_End)
- return get_irn_n(cfop, -1);
+ return get_nodes_block(cfop);
return cfop->out[1];
} else
++out_pos;
}
/*--------------------------------------------------------------------*/
-/** Building and Removing the out datasturcture **/
+/** Building and Removing the out datastructure **/
/** **/
/** The outs of a graph are allocated in a single, large array. **/
/** This allows to allocate and deallocate the memory for the outs **/
/** Returns the amount of out edges for not yet visited successors. */
static int _count_outs(ir_node *n) {
- int start, i, res, irn_arity;
+ int i, res, irn_arity;
mark_irn_visited(n);
n->out = (ir_node **) 1; /* Space for array size. */
- start = is_Block(n) ? 0 : -1;
irn_arity = get_irn_arity(n);
- res = irn_arity - start + 1; /* --1 or --0; 1 for array size. */
+ res = irn_arity + 1;
- for (i = start; i < irn_arity; ++i) {
+ if (is_no_Block(n)) {
+ ir_node *pred = get_nodes_block(n);
+
+ /* count outs for predecessors */
+ if (irn_not_visited(pred))
+ res += _count_outs(pred);
+
+ /* Count my outs */
+ pred->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(pred->out) + 1);
+ ++res;
+ }
+ for (i = 0; i < irn_arity; ++i) {
/* Optimize Tuples. They annoy if walking the cfg. */
ir_node *pred = skip_Tuple(get_irn_n(n, i));
set_irn_n(n, i, pred);
- /* count outs for successors */
+ /* count outs for predecessors */
if (irn_not_visited(pred))
res += _count_outs(pred);
* @return The next free address
*/
static ir_node **_set_out_edges(ir_node *n, ir_node **free) {
- int n_outs, start, i, irn_arity;
+ int n_outs, i, irn_arity;
ir_node *pred;
set_irn_visited(n, get_irg_visited(current_ir_graph));
edge. */
n->out[0] = (ir_node *)0;
- start = is_Block(n) ? 0 : -1;
- irn_arity = get_irn_arity(n);
+ if (is_no_Block(n)) {
+ pred = get_nodes_block(n);
+ /* Recursion */
+ if (get_irn_visited(pred) < get_irg_visited(current_ir_graph))
+ free = _set_out_edges(pred, free);
+ /* Remember our back edge */
+ pred->out[get_irn_n_outs(pred)+1] = n;
+ pred->out[0] = INT_TO_PTR(get_irn_n_outs(pred) + 1);
+ }
- for (i = start; i < irn_arity; ++i) {
+ irn_arity = get_irn_arity(n);
+ for (i = 0; i < irn_arity; ++i) {
pred = get_irn_n(n, i);
/* Recursion */
if (get_irn_visited(pred) < get_irg_visited(current_ir_graph))
* which is saved in "env"
*/
static void node_arity_count(ir_node * node, void * env) {
- int *anz = (int *) env, arity, n_outs, i, start;
+ int *anz = (int *) env, arity, n_outs, i;
ir_node *succ;
arity = get_irn_arity(node);
- start = (is_Block(node)) ? 0 : -1;
+ n_outs = 1 + arity;
- n_outs = 1 + arity + (-start); // ((is_Block(node)) ? 0 : 1); // Why + 1??
- *anz += n_outs;
+ if (is_no_Block(node)) {
+ succ = get_nodes_block(node);
+ succ->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(succ->out) + 1);
- for(i = start; i < arity; i++) {
+ ++n_outs;
+ }
+ *anz += n_outs;
+ for (i = 0; i < arity; i++) {
succ = get_irn_n(node, i);
succ->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(succ->out) + 1);
}
static void set_out_pointer(ir_node * node, void *env) {
int i, arity = get_irn_arity(node);
ir_node *succ;
- int start = (!is_Block(node)) ? -1 : 0;
(void) env;
- for (i = start; i < arity; ++i) {
+ if (is_no_Block(node)) {
+ succ = get_nodes_block(node);
+ succ->out[get_irn_n_outs(succ)+1] = node;
+ succ->out[0] = INT_TO_PTR(get_irn_n_outs(succ) + 1);
+
+ }
+ for (i = 0; i < arity; ++i) {
succ = get_irn_n(node, i);
succ->out[get_irn_n_outs(succ)+1] = node;
succ->out[0] = INT_TO_PTR(get_irn_n_outs(succ) + 1);
which may be wrong. Add Conv's then. */
mode = get_irn_mode(args[i]);
if (mode != get_irn_mode(repl)) {
- repl = new_r_Conv(irg, get_irn_n(repl, -1), repl, mode);
+ repl = new_r_Conv(irg, get_nodes_block(repl), repl, mode);
}
exchange(args[i], repl);
}
/* set the current graph (this is important for several firm functions) */
current_ir_graph = irg;
- /* Normalize proj nodes. */
- normalize_proj_nodes(irg);
-
/* we do this before critical edge split. As this produces less returns,
because sometimes (= 164.gzip) multiple returns are slower */
normalize_n_returns(irg);
}
}
+/* FIXME: is this still correct:? Proj's are neither scheduled anymore nor they have a block ... */
static void
clean_remat_info(spill_ilp_t * si)
{
return irn;
if (get_irn_op(irn) == op_Mul && mode_is_int(mode)) {
- ir_node *block = get_irn_n(irn, -1);
+ ir_node *block = get_nodes_block(irn);
ir_node *left = get_binop_left(irn);
ir_node *right = get_binop_right(irn);
tarval *tv = NULL;
static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) {
dbg_info *dbg = get_irn_dbg_info(div);
ir_node *n = get_binop_left(div);
- ir_node *block = get_irn_n(div, -1);
+ ir_node *block = get_nodes_block(div);
ir_mode *mode = get_irn_mode(n);
int bits = get_mode_size_bits(mode);
ir_node *q, *t, *c;
left = get_Div_left(irn);
mode = get_irn_mode(left);
- block = get_irn_n(irn, -1);
+ block = get_nodes_block(irn);
dbg = get_irn_dbg_info(irn);
bits = get_mode_size_bits(mode);
left = get_Mod_left(irn);
mode = get_irn_mode(left);
- block = get_irn_n(irn, -1);
+ block = get_nodes_block(irn);
dbg = get_irn_dbg_info(irn);
bits = get_mode_size_bits(mode);
n = (bits + 7) / 8;
left = get_DivMod_left(irn);
mode = get_irn_mode(left);
- block = get_irn_n(irn, -1);
+ block = get_nodes_block(irn);
dbg = get_irn_dbg_info(irn);
bits = get_mode_size_bits(mode);
*/
static void ird_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
if (dump_anchors) {
- int i;
-
- if (pre)
- pre(irg->anchor, env);
-
- for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
- ir_node *n = get_irg_anchor(irg, i);
-
- if (n) {
- /* reset the visit flag: will be increase in the walker */
- set_irg_visited(irg, get_irg_visited(irg) - 1);
- irg_walk(n, pre, post, env);
- }
- }
- if (post)
- post(irg->anchor, env);
+ irg_walk_anchors(irg, pre, post, env);
} else {
irg_walk_graph(irg, pre, post, env);
}
* do not use get_nodes_block() here, will fail
* if the irg is not pinned.
*/
- if (get_irn_n(from, -1) == get_irn_n(get_irn_n(from, to), -1))
+ if (get_nodes_block(from) == get_nodes_block(get_irn_n(from, to)))
fprintf(F, INTRA_DATA_EDGE_ATTR);
else
fprintf(F, INTER_DATA_EDGE_ATTR);
* do not use get_nodes_block() here, will fail
* if the irg is not pinned.
*/
- if (get_irn_n(from, -1) == get_irn_n(get_irn_n(from, to), -1))
+ if (get_nodes_block(from) == get_nodes_block(get_irn_n(from, to)))
fprintf(F, INTRA_MEM_EDGE_ATTR);
else
fprintf(F, INTER_MEM_EDGE_ATTR);
fprintf(F, KEEP_ALIVE_DF_EDGE_ATTR);
}
break;
+ case iro_Anchor:
+ fprintf(F, ANCHOR_EDGE_ATTR);
+ break;
default:
if (is_Proj(from)) {
if (get_irn_mode(from) == mode_M)
if (get_irn_pinned(n) == op_pin_state_floats &&
get_irg_pinned(get_irn_irg(n)) == op_pin_state_floats) {
fprintf(F, " node was pinned in ");
- dump_node_opcode(F, get_irn_n(n, -1));
- fprintf(F, " %ld\n", get_irn_node_nr(get_irn_n(n, -1)));
+ dump_node_opcode(F, get_nodes_block(n));
+ fprintf(F, " %ld\n", get_irn_node_nr(get_nodes_block(n)));
}
fprintf(F, " arity: %d\n", get_irn_intra_arity(n));
fprintf(F, " pred nodes: \n");
if (!is_Block(n)) {
fprintf(F, " -1: ");
- dump_node_opcode(F, get_irn_n(n, -1));
- fprintf(F, " %ld\n", get_irn_node_nr(get_irn_n(n, -1)));
+ dump_node_opcode(F, get_nodes_block(n));
+ fprintf(F, " %ld\n", get_irn_node_nr(get_nodes_block(n)));
}
for ( i = 0; i < get_irn_intra_arity(n); ++i) {
fprintf(F, " %d: %s ", i, is_intra_backedge(n, i) ? "be" : " ");
}
if (edges_activated_kind(irg, EDGE_KIND_BLOCK) && is_Block(src)) {
- /* do not use get_nodes_block() here, it fails when running unpinned */
- ir_node *bl_old = old_tgt ? get_irn_n(skip_Proj(old_tgt), -1) : NULL;
+ ir_node *bl_old = old_tgt ? get_nodes_block(skip_Proj(old_tgt)) : NULL;
ir_node *bl_tgt = NULL;
if (tgt)
- bl_tgt = is_Bad(tgt) ? tgt : get_irn_n(skip_Proj(tgt), -1);
+ bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(skip_Proj(tgt));
edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
}
if (get_irn_arity(node) == arity) {
/* keep old array */
} else {
- /* don't use get_nodes_block here, we allow turn_into_tuple for unpinned nodes */
- ir_node *block = get_irn_n(node, -1);
+ ir_node *block = get_nodes_block(node);
/* Allocate new array, don't free old in_array, it's on the obstack. */
edges_node_deleted(node, current_ir_graph);
node->in = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity+1);
/* clear the new in array, else edge_notify tries to delete garbage */
memset(node->in, 0, (arity+1) * sizeof(node->in[0]));
- set_irn_n(node, -1, block);
+ set_nodes_block(node, block);
}
}
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
block = get_nodes_block(n);
- set_irn_n(nn, -1, get_new_node(block));
+ set_nodes_block(nn, get_new_node(block));
j = 0;
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
/* Place floating nodes. */
if (get_irn_pinned(n) == op_pin_state_floats) {
- ir_node *curr_block = get_irn_n(n, -1);
+ ir_node *curr_block = get_nodes_block(n);
int in_dead_block = is_Block_unreachable(curr_block);
int depth = 0;
ir_node *b = NULL; /* The block to place this node in */
*/
if (! in_dead_block) {
if (get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1)))
+ is_Block_unreachable(get_nodes_block(pred)))
set_nodes_block(pred, curr_block);
}
place_floats_early(pred, worklist);
/* Because all loops contain at least one op_pin_state_pinned node, now all
our inputs are either op_pin_state_pinned or place_early() has already
been finished on them. We do not have any unfinished inputs! */
- pred_block = get_irn_n(pred, -1);
+ pred_block = get_nodes_block(pred);
if ((!is_Block_dead(pred_block)) &&
(get_Block_dom_depth(pred_block) > depth)) {
b = pred_block;
depth = get_Block_dom_depth(pred_block);
}
/* Avoid that the node is placed in the Start block */
- if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)
+ if ((depth == 1) && (get_Block_dom_depth(get_nodes_block(n)) > 1)
&& get_irg_phase_state(current_ir_graph) != phase_backend) {
b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
assert(b != get_irg_start_block(current_ir_graph));
}
} else if (is_Phi(n)) {
ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
+ ir_node *curr_block = get_nodes_block(n);
int in_dead_block = is_Block_unreachable(curr_block);
/*
* Phi nodes: move nodes from dead blocks into the effective use
* of the Phi-input if the Phi is not in a bad block.
*/
- pred = get_irn_n(n, -1);
+ pred = get_nodes_block(n);
if (irn_not_visited(pred))
waitq_put(worklist, pred);
if (irn_not_visited(pred)) {
if (! in_dead_block &&
get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
+ is_Block_unreachable(get_nodes_block(pred))) {
set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
}
waitq_put(worklist, pred);
}
} else {
ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
+ ir_node *curr_block = get_nodes_block(n);
int in_dead_block = is_Block_unreachable(curr_block);
/*
* All other nodes: move nodes from dead blocks into the same block.
*/
- pred = get_irn_n(n, -1);
+ pred = get_nodes_block(n);
if (irn_not_visited(pred))
waitq_put(worklist, pred);
if (irn_not_visited(pred)) {
if (! in_dead_block &&
get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
+ is_Block_unreachable(get_nodes_block(pred))) {
set_nodes_block(pred, curr_block);
}
waitq_put(worklist, pred);
}
if (! block)
- block = get_irn_n(producer, -1);
+ block = get_nodes_block(producer);
} else {
assert(is_no_Block(consumer));
block = get_nodes_block(consumer);
dca = get_deepest_common_ancestor(succ, dca);
} else {
/* ignore if succ is in dead code */
- succ_blk = get_irn_n(succ, -1);
+ succ_blk = get_nodes_block(succ);
if (is_Block_unreachable(succ_blk))
continue;
dca = consumer_dom_dca(dca, succ, node);
(get_irn_mode(n) != mode_X)) {
/* Remember the early_blk placement of this block to move it
out of loop no further than the early_blk placement. */
- early_blk = get_irn_n(n, -1);
+ early_blk = get_nodes_block(n);
/*
* BEWARE: Here we also get code, that is live, but
irg->fp_model = model;
}
-/**
- * walker Start->End: places Proj nodes into the same block
- * as it's predecessors
- *
- * @param n the node
- * @param env ignored
- */
-static void normalize_proj_walker(ir_node *n, void *env) {
- (void) env;
- if (is_Proj(n)) {
- ir_node *pred = get_Proj_pred(n);
- ir_node *block = get_nodes_block(pred);
-
- set_nodes_block(n, block);
- }
-}
-
-/* move Proj nodes into the same block as its predecessors */
-void normalize_proj_nodes(ir_graph *irg) {
- irg_walk_graph(irg, NULL, normalize_proj_walker, NULL);
- set_irg_outs_inconsistent(irg);
-}
-
/* set a description for local value n */
void set_irg_loc_description(ir_graph *irg, int n, void *description) {
assert(0 <= n && n < irg->n_loc);
static INLINE void
_set_irg_end_block(ir_graph *irg, ir_node *node) {
+ /* FIXME: if this line is killed the whole graph collapse, why */
set_irn_n(irg->anchor, -1, node);
set_irn_n(irg->anchor, anchor_end_block, node);
}
pre(node, env);
if (node->op != op_Block) {
- ir_node *pred = get_irn_n(node, -1);
+ ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_2_pre(pred, pre, env);
}
set_irn_visited(node, irg->visited);
if (node->op != op_Block) {
- ir_node *pred = get_irn_n(node, -1);
+ ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_2_post(pred, post, env);
}
pre(node, env);
if (node->op != op_Block) {
- ir_node *pred = get_irn_n(node, -1);
+ ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_2_both(pred, pre, post, env);
}
pre(node, env);
if (node->op != op_Block) {
- ir_node *pred = get_irn_n(node, -1);
+ ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_in_or_dep_2_pre(pred, pre, env);
}
set_irn_visited(node, irg->visited);
if (node->op != op_Block) {
- ir_node *pred = get_irn_n(node, -1);
+ ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_in_or_dep_2_post(pred, post, env);
}
pre(node, env);
if (node->op != op_Block) {
- ir_node *pred = get_irn_n(node, -1);
+ ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_in_or_dep_2_both(pred, pre, post, env);
}
* irg.
*/
if (! is_Block(node))
- node = get_irn_n(node, -1);
+ node = get_nodes_block(node);
if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
- node = get_irn_n(node, -1);
+ node = get_nodes_block(node);
assert(get_irn_op(node) == op_Block);
return node->attr.block.irg;
}
int i, arity = get_irn_arity(n);
printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
if (!is_Block(n)) {
- ir_node *pred = get_irn_n(n, -1);
+ ir_node *pred = get_nodes_block(n);
printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
get_irn_node_nr(pred), (void *)pred);
}
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* Quot(x, 1) == x */
/* Turn Quot into a tuple (mem, jmp, bad, a) */
ir_node *mem = get_Quot_mem(n);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Quot_max);
set_Tuple_pred(n, pn_Quot_M, mem);
set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
/* Turn DivMod into a tuple (mem, jmp, bad, a, 0) */
ir_node *a = get_DivMod_left(n);
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
ir_mode *mode = get_DivMod_resmode(n);
turn_into_tuple(n, pn_DivMod_max);
if (op == op_Load) {
/* get the Load address */
ir_node *addr = get_Load_ptr(a);
- ir_node *blk = get_irn_n(a, -1);
+ ir_node *blk = get_nodes_block(a);
ir_node *confirm;
if (value_not_null(addr, &confirm)) {
} else if (op == op_Store) {
/* get the load/store address */
ir_node *addr = get_Store_ptr(a);
- ir_node *blk = get_irn_n(a, -1);
+ ir_node *blk = get_nodes_block(a);
ir_node *confirm;
if (value_not_null(addr, &confirm)) {
if (mode_is_num(mode)) {
if (a == b) {
- ir_node *block = get_irn_n(n, -1);
+ ir_node *block = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
n = new_rd_Sub(
get_irn_dbg_info(n),
current_ir_graph,
- get_irn_n(n, -1),
+ get_nodes_block(n),
b,
get_Minus_op(a),
mode);
n = new_rd_Sub(
get_irn_dbg_info(n),
current_ir_graph,
- get_irn_n(n, -1),
+ get_nodes_block(n),
a,
get_Minus_op(b),
mode);
ir_node *mb = get_Mul_right(a);
if (b == ma) {
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
ma,
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
} else if (b == mb) {
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
mb,
ir_node *mb = get_Mul_right(b);
if (a == ma) {
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
ma,
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
} else if (a == mb) {
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
mb,
else if (is_Not(a) && classify_Const(b) == CNST_ONE) {
/* ~x + 1 = -x */
ir_node *op = get_Not_op(a);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
}
if (left == b) {
if (mode != get_irn_mode(right)) {
/* This Sub is an effective Cast */
- right = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), right, mode);
+ right = new_r_Conv(get_irn_irg(n), get_nodes_block(n), right, mode);
}
n = right;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
} else if (right == b) {
if (mode != get_irn_mode(left)) {
/* This Sub is an effective Cast */
- left = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), left, mode);
+ left = new_r_Conv(get_irn_irg(n), get_nodes_block(n), left, mode);
}
n = left;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
n = new_rd_Minus(
get_irn_dbg_info(n),
current_ir_graph,
- get_irn_n(n, -1),
+ get_nodes_block(n),
b,
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A);
ir_node *mb = get_Mul_right(a);
if (ma == b) {
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
current_ir_graph, blk,
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
} else if (mb == b) {
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
current_ir_graph, blk,
} else if (get_irn_op(a) == op_Sub) {
ir_node *x = get_Sub_left(a);
ir_node *y = get_Sub_right(a);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
ir_mode *m_b = get_irn_mode(b);
ir_mode *m_y = get_irn_mode(y);
ir_node *add;
else if (value_of(b) == get_mode_minus_one(mode))
r = a;
if (r) {
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), r, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), r, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
if (value != n) {
/* Turn Div into a tuple (mem, jmp, bad, value) */
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
if (value != n) {
/* Turn Mod into a tuple (mem, jmp, bad, value) */
ir_node *mem = get_Mod_mem(n);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
if (evaluated) { /* replace by tuple */
ir_node *mem = get_DivMod_mem(n);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
* not run it in the equivalent_node() context.
*/
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
- get_irn_n(n, -1), a, mode);
+ get_nodes_block(n), a, mode);
DBG_OPT_CONFIRM(oldn, n);
} else if (sign == value_classified_positive) {
ir_mode *a_mode = get_irn_mode(a_op);
ir_mode *b_mode = get_irn_mode(b_op);
if(a_mode == b_mode && (mode_is_int(a_mode) || a_mode == mode_b)) {
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
n = exact_copy(n);
set_binop_left(n, a_op);
if (c != NULL) {
/* (a sop c) & (b sop c) => (a & b) sop c */
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
ir_node *new_n = exact_copy(n);
set_binop_left(new_n, op1);
n = new_rd_And(dbgi, irg, blk, new_n, c, mode);
} else {
n = exact_copy(a);
- set_irn_n(n, -1, blk);
+ set_nodes_block(n, blk);
set_binop_left(n, new_n);
set_binop_right(n, c);
}
if (a == b) {
/* a ^ a = 0 */
- n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1),
+ n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n),
mode, get_mode_null(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A);
} else if ((mode == mode_b)
&& (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)
&& (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
/* The Eor negates a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
+ n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a), mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL);
&& (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) {
/* The Eor is a Not. Replace it by a Not. */
/* ????!!!Extend to bitfield 1111111. */
- n = new_r_Not(current_ir_graph, get_irn_n(n, -1), a, mode_b);
+ n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
} else {
&& (get_irn_mode(a) == mode_b)
&& (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
/* We negate a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
+ n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
return n;
if (op_a == op_Sub && classify_Const(get_Sub_right(a)) == CNST_ONE) {
/* ~(x-1) = -x */
ir_node *op = get_Sub_left(a);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, get_irn_mode(n));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_MINUS_1);
}
ir_node *op = get_Not_op(a);
ir_mode *mode = get_irn_mode(op);
tarval *tv = get_mode_one(mode);
- ir_node *blk = get_irn_n(n, -1);
+ ir_node *blk = get_nodes_block(n);
ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, op, c, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_NOT);
ir_type *tp = get_irn_type(n);
if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) {
- n = new_rd_Const_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred),
+ n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred),
get_Const_tarval(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
} else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) {
- n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_SymConst_symbol(pred),
+ n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred),
get_SymConst_kind(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
}
proj_nr = get_Proj_proj(proj);
switch (proj_nr) {
case pn_Div_X_regular:
- return new_r_Jmp(current_ir_graph, get_irn_n(div, -1));
+ return new_r_Jmp(current_ir_graph, get_nodes_block(div));
case pn_Div_X_except:
/* we found an exception handler, remove it */
switch (proj_nr) {
case pn_Mod_X_regular:
- return new_r_Jmp(current_ir_graph, get_irn_n(mod, -1));
+ return new_r_Jmp(current_ir_graph, get_nodes_block(mod));
case pn_Mod_X_except:
/* we found an exception handler, remove it */
switch (proj_nr) {
case pn_DivMod_X_regular:
- return new_r_Jmp(current_ir_graph, get_irn_n(divmod, -1));
+ return new_r_Jmp(current_ir_graph, get_nodes_block(divmod));
case pn_DivMod_X_except:
/* we found an exception handler, remove it */
}
if (changed) {
- ir_node *block = get_irn_n(n, -1); /* Beware of get_nodes_Block() */
+ ir_node *block = get_nodes_block(n); /* Beware of get_nodes_Block() */
if (changed & 2) /* need a new Const */
right = new_Const(mode, tv);
in[i] = get_Confirm_value(pred);
}
/* move the Confirm nodes "behind" the Phi */
- block = get_irn_n(phi, -1);
+ block = get_nodes_block(phi);
new_Phi = new_r_Phi(current_ir_graph, block, n, in, get_irn_mode(phi));
return new_r_Confirm(current_ir_graph, block, new_Phi, bound, pnc);
}
}
/* ok, all conditions met */
- block = get_irn_n(or, -1);
+ block = get_nodes_block(or);
new_and = new_r_And(current_ir_graph, block,
value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
return or;
/* yet, condition met */
- block = get_irn_n(or, -1);
+ block = get_nodes_block(or);
n = new_r_Rot(current_ir_graph, block, x, c1, mode);
return or;
/* yet, condition met */
- block = get_irn_n(or, -1);
+ block = get_nodes_block(or);
/* a Rot Left */
n = new_r_Rot(current_ir_graph, block, x, v, mode);
if (flag) {
/* ok, we can replace it */
- ir_node *in[2], *irn, *block = get_irn_n(n, -1);
+ ir_node *in[2], *irn, *block = get_nodes_block(n);
in[0] = get_binop_left(left);
in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
ir_node *t = get_Mux_true(n);
if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
- ir_node *block = get_irn_n(n, -1);
+ ir_node *block = get_nodes_block(n);
/*
* Note: normalization puts the constant on the right site,
ir_node *old = n;
n = identify(value_table, n);
- if (get_irn_n(old, -1) != get_irn_n(n, -1))
+ if (get_nodes_block(old) != get_nodes_block(n))
set_irg_pinned(current_ir_graph, op_pin_state_floats);
return n;
} /* identify_cons */
/* currently this checks fails for blocks with exception
outputs (and these are NOT basic blocks). So it is disabled yet. */
ASSERT_AND_RET_DBG(
- (pred_i == pred_j) || (get_irn_n(pred_i, -1) != get_irn_n(pred_j, -1)),
+ (pred_i == pred_j) || (get_nodes_block(pred_i) != get_nodes_block(pred_j)),
"At least two different PhiM predecessors are in the same block",
0,
- ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_irn_n(pred_i, -1))
+ ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_nodes_block(pred_i))
);
#endif
}
DB((dbg, LEVEL_1, "===> Performing condition evaluation on %+F\n", irg));
remove_critical_cf_edges(irg);
- normalize_proj_nodes(irg);
edges_assure(irg);
set_using_irn_link(irg);
/* Phi always stop the recursion */
if (is_Phi(node))
- return get_irn_intra_n(node, -1) == block;
+ return get_nodes_block(node) == block;
if (! is_nice_value(node))
return 0;
/* Phi always stop the recursion */
if (is_Phi(node)) {
- if (get_irn_intra_n(node, -1) == block)
+ if (get_nodes_block(node) == block)
return get_Phi_pred(node, pos);
return node;
}
need_new = 0;
do {
ir_node *pred = get_irn_intra_n(node, i);
- ir_node *pred_blk = get_irn_intra_n(pred, -1);
+ ir_node *pred_blk = get_nodes_block(pred);
ir_node *leader = value_lookup(get_block_info(pred_blk)->avail_out, pred);
in[i] = translate(leader ? leader : pred, block, pos, env);
need_new |= (in[i] != pred);
struct obstack *old;
if (is_Phi(node)) {
- if (get_irn_intra_n(node, -1) == block)
+ if (get_nodes_block(node) == block)
return get_Phi_pred(node, pos);
return node;
}
/* check if the node has at least one Phi predecessor */
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_intra_n(node, i);
- ir_node *pred_bl = get_irn_intra_n(pred, -1);
+ ir_node *pred_bl = get_nodes_block(pred);
ir_node *leader = value_lookup(get_block_info(pred_bl)->avail_out, pred);
leader = leader != NULL ? leader : pred;
- if (is_Phi(leader) && get_irn_intra_n(pred, -1) == block)
+ if (is_Phi(leader) && get_nodes_block(pred) == block)
break;
}
if (i >= arity) {
node might depend on that. */
copy_node_attr(node, nn);
- set_irn_n(nn, -1, get_irn_intra_n(node, -1));
+ set_nodes_block(nn, get_nodes_block(node));
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_intra_n(node, i);
- ir_node *pred_bl = get_irn_intra_n(pred, -1);
+ ir_node *pred_bl = get_nodes_block(pred);
ir_node *leader = value_lookup(get_block_info(pred_bl)->avail_out, pred);
leader = leader != NULL ? leader : pred;
- if (is_Phi(leader) && get_irn_intra_n(pred, -1) == block)
+ if (is_Phi(leader) && get_nodes_block(pred) == block)
set_irn_n(nn, i, get_Phi_pred(leader, pos));
else
set_irn_n(nn, i, leader);
for (i = get_irn_intra_arity(n) - 1; i >= 0; --i) {
pred = get_irn_intra_n(n, i);
- pred_blk = get_irn_intra_n(pred, -1);
+ pred_blk = get_nodes_block(pred);
if (block_dominates(pred_blk, blk))
continue;
/* pred do not dominate it, but may be in the set */
a_env.end_block = get_irg_end_block(irg);
a_env.pairs = NULL;
- /* Move Proj's into the same block as their args,
- else we would assign the result to wrong blocks */
- normalize_proj_nodes(irg);
-
/* critical edges MUST be removed */
remove_critical_cf_edges(irg);
irg_block_walk_graph(irg, AliasSetDestroyer, NULL, NULL);
obstack_free(&obst, NULL);
- normalize_proj_nodes(irg);
irg_walk_graph(irg, NormaliseSync, NULL, NULL);
- optimize_graph_df(irg);
+ optimize_graph_df(irg);
irg_walk_graph(irg, NormaliseSync, NULL, NULL);
dump_ir_block_graph(irg, "-postfluffig");
}
DB((dbg, LEVEL_4, " = OVERFLOW"));
return NULL;
}
- return new_r_Const(current_ir_graph, get_irn_n(rc, -1), get_tarval_mode(tv), tv);
+ return new_r_Const(current_ir_graph, get_nodes_block(rc), get_tarval_mode(tv), tv);
}
return do_apply(e->code, NULL, rc, e->rc, get_irn_mode(rc));
}
(void) env;
set_irn_link(irn, NULL);
+ /* FIXME: must be removed but edges must be fixed first*/
if (is_Proj(irn)) {
ir_node *pred = get_Proj_pred(irn);
- set_irn_n(irn, -1, get_irn_n(pred, -1));
+ set_irn_n(irn, -1, get_nodes_block(pred));
}
}