static INLINE int using_irn_link(const ir_graph *irg) { (void) irg; return 0; }
#endif
+/** Normalization: Move Proj nodes into the same block as its predecessors */
+void normalize_proj_nodes(ir_graph *irg);
+
/** set a description for local value n */
void set_irg_loc_description(ir_graph *irg, int n, void *description);
*/
void set_generic_function_ptr(ir_op *op, op_func func);
-/**
- * The get_nodes_block operation.
- * This operation returns the block of a node.
- * For block nodes, it returns its Macroblock header.
- */
-typedef ir_node *(*get_block_func)(const ir_node *self);
-
-/**
- * The set_nodes_block operation.
- * This operation sets the block of a node.
- */
-typedef void (*set_block_func)(ir_node *self, ir_node *block);
-
/**
* The compute value operation.
* This operation evaluates an IR node into a tarval if possible,
* io_op Operations.
*/
typedef struct {
- get_block_func get_block; /**< Return the block of a node. */
- set_block_func set_block; /**< Sets the block of a node. */
- computed_value_func computed_value; /**< Evaluates a node into a tarval if possible. */
- equivalent_node_func equivalent_node; /**< Optimizes the node by returning an equivalent one. */
- transform_node_func transform_node; /**< Optimizes the node by transforming it. */
- node_cmp_attr_func node_cmp_attr; /**< Compares two node attributes. */
- reassociate_func reassociate; /**< Reassociate a tree. */
- copy_attr_func copy_attr; /**< Copy node attributes. */
- get_type_func get_type; /**< Return the type of a node. */
- get_type_attr_func get_type_attr; /**< Return the type attribute of a node. */
- get_entity_attr_func get_entity_attr; /**< Return the entity attribute of a node. */
- verify_node_func verify_node; /**< Verify the node. */
- verify_proj_node_func verify_proj_node; /**< Verify the Proj node. */
- dump_node_func dump_node; /**< Dump a node. */
- op_func generic; /**< A generic function. */
+ computed_value_func computed_value; /**< evaluates a node into a tarval if possible. */
+ equivalent_node_func equivalent_node; /**< optimizes the node by returning an equivalent one. */
+ transform_node_func transform_node; /**< optimizes the node by transforming it. */
+ node_cmp_attr_func node_cmp_attr; /**< compares two node attributes. */
+ reassociate_func reassociate; /**< reassociate a tree */
+ copy_attr_func copy_attr; /**< copy node attributes */
+ get_type_func get_type; /**< return the type of a node */
+ get_type_attr_func get_type_attr; /**< return the type attribute of a node */
+ get_entity_attr_func get_entity_attr; /**< return the entity attribute of a node */
+ verify_node_func verify_node; /**< verify the node */
+ verify_proj_node_func verify_proj_node; /**< verify the Proj node */
+ dump_node_func dump_node; /**< dump a node */
+ op_func generic; /**< a generic function */
} ir_op_ops;
/**
if (node->op == op_Proj)
pred = get_irn_n(node, 0);
else
- pred = get_nodes_block(node);
+ pred = get_irn_n(node, -1);
if (pred->visited < current_ir_graph->visited)
my_irg_walk_2_both(pred, pre, post, env);
}
/* Return the extended block of a node. */
ir_extblk *get_nodes_extbb(ir_node *node) {
- ir_node *block = is_Block(node) ? node : get_nodes_block(node);
+ ir_node *block = is_Block(node) ? node : get_irn_n(node, -1);
return get_Block_extbb(block);
}
if (get_irn_mode(bl->out[i]) == mode_X) {
/* ignore End if we are in the Endblock */
if (get_irn_op(bl->out[i]) == op_End &&
- get_nodes_block(bl->out[i]) == bl)
+ get_irn_n(bl->out[i], -1) == bl)
continue;
else
++n_cfg_outs;
if (get_irn_mode(bl->out[i]) == mode_X) {
/* ignore End if we are in the Endblock */
if (get_irn_op(bl->out[i]) == op_End &&
- get_nodes_block(bl->out[i]) == bl)
+ get_irn_n(bl->out[i], -1) == bl)
continue;
if (out_pos == pos) {
ir_node *cfop = bl->out[i];
/* handle keep-alive here */
if (get_irn_op(cfop) == op_End)
- return get_nodes_block(cfop);
+ return get_irn_n(cfop, -1);
return cfop->out[1];
} else
++out_pos;
}
/*--------------------------------------------------------------------*/
-/** Building and Removing the out datastructure **/
+/** Building and Removing the out datasturcture **/
/** **/
/** The outs of a graph are allocated in a single, large array. **/
/** This allows to allocate and deallocate the memory for the outs **/
/** Returns the amount of out edges for not yet visited successors. */
static int _count_outs(ir_node *n) {
- int i, res, irn_arity;
+ int start, i, res, irn_arity;
mark_irn_visited(n);
n->out = (ir_node **) 1; /* Space for array size. */
+ start = is_Block(n) ? 0 : -1;
irn_arity = get_irn_arity(n);
- res = irn_arity + 1;
+ res = irn_arity - start + 1; /* --1 or --0; 1 for array size. */
- if (is_no_Block(n)) {
- ir_node *pred = get_nodes_block(n);
-
- /* count outs for predecessors */
- if (irn_not_visited(pred))
- res += _count_outs(pred);
-
- /* Count my outs */
- pred->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(pred->out) + 1);
- ++res;
- }
- for (i = 0; i < irn_arity; ++i) {
+ for (i = start; i < irn_arity; ++i) {
/* Optimize Tuples. They annoy if walking the cfg. */
ir_node *pred = skip_Tuple(get_irn_n(n, i));
set_irn_n(n, i, pred);
- /* count outs for predecessors */
+ /* count outs for successors */
if (irn_not_visited(pred))
res += _count_outs(pred);
* @return The next free address
*/
static ir_node **_set_out_edges(ir_node *n, ir_node **free) {
- int n_outs, i, irn_arity;
+ int n_outs, start, i, irn_arity;
ir_node *pred;
set_irn_visited(n, get_irg_visited(current_ir_graph));
edge. */
n->out[0] = (ir_node *)0;
- if (is_no_Block(n)) {
- pred = get_nodes_block(n);
- /* Recursion */
- if (get_irn_visited(pred) < get_irg_visited(current_ir_graph))
- free = _set_out_edges(pred, free);
- /* Remember our back edge */
- pred->out[get_irn_n_outs(pred)+1] = n;
- pred->out[0] = INT_TO_PTR(get_irn_n_outs(pred) + 1);
- }
-
+ start = is_Block(n) ? 0 : -1;
irn_arity = get_irn_arity(n);
- for (i = 0; i < irn_arity; ++i) {
+
+ for (i = start; i < irn_arity; ++i) {
pred = get_irn_n(n, i);
/* Recursion */
if (get_irn_visited(pred) < get_irg_visited(current_ir_graph))
* which is saved in "env"
*/
static void node_arity_count(ir_node * node, void * env) {
- int *anz = (int *) env, arity, n_outs, i;
+ int *anz = (int *) env, arity, n_outs, i, start;
ir_node *succ;
arity = get_irn_arity(node);
- n_outs = 1 + arity;
-
- if (is_no_Block(node)) {
- succ = get_nodes_block(node);
- succ->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(succ->out) + 1);
+ start = (is_Block(node)) ? 0 : -1;
- ++n_outs;
- }
+ n_outs = 1 + arity + (-start); // ((is_Block(node)) ? 0 : 1); // Why + 1??
*anz += n_outs;
- for (i = 0; i < arity; i++) {
+
+ for(i = start; i < arity; i++) {
succ = get_irn_n(node, i);
succ->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(succ->out) + 1);
}
static void set_out_pointer(ir_node * node, void *env) {
int i, arity = get_irn_arity(node);
ir_node *succ;
+ int start = (!is_Block(node)) ? -1 : 0;
(void) env;
- if (is_no_Block(node)) {
- succ = get_nodes_block(node);
- succ->out[get_irn_n_outs(succ)+1] = node;
- succ->out[0] = INT_TO_PTR(get_irn_n_outs(succ) + 1);
-
- }
- for (i = 0; i < arity; ++i) {
+ for (i = start; i < arity; ++i) {
succ = get_irn_n(node, i);
succ->out[get_irn_n_outs(succ)+1] = node;
succ->out[0] = INT_TO_PTR(get_irn_n_outs(succ) + 1);
which may be wrong. Add Conv's then. */
mode = get_irn_mode(args[i]);
if (mode != get_irn_mode(repl)) {
- repl = new_r_Conv(irg, get_nodes_block(repl), repl, mode);
+ repl = new_r_Conv(irg, get_irn_n(repl, -1), repl, mode);
}
exchange(args[i], repl);
}
/* set the current graph (this is important for several firm functions) */
current_ir_graph = irg;
+ /* Normalize proj nodes. */
+ normalize_proj_nodes(irg);
+
/* we do this before critical edge split. As this produces less returns,
because sometimes (= 164.gzip) multiple returns are slower */
normalize_n_returns(irg);
NULL,
NULL,
NULL,
- NULL,
- NULL,
copy_attr,
NULL,
NULL,
}
}
-/* FIXME: is this still correct:? Proj's are neither scheduled anymore nor they have a block ... */
static void
clean_remat_info(spill_ilp_t * si)
{
#ECC="/ben/beck/ipd/bin/eccp -march=arm -bra-chordal-co-algo=heur"
ECC_CFLAGS="${ADDCFLAGS} -v -O3 -D__builtin_memcpy=memcpy -D__builtin_memset=memset -D__builtin_strlen=strlen -D__builtin_strcpy=strcpy -D__builtin_strcmp=strcmp -DNO_TRAMPOLINES -ffp-strict"
GCC="gcc"
-GCC_CFLAGS="-O0 -Itcc"
+GCC_CFLAGS="-O0 -Itcc -m32"
LINKFLAGS="-lm"
TIMEOUT_COMPILE=300
TIMEOUT_RUN=30
return irn;
if (get_irn_op(irn) == op_Mul && mode_is_int(mode)) {
- ir_node *block = get_nodes_block(irn);
+ ir_node *block = get_irn_n(irn, -1);
ir_node *left = get_binop_left(irn);
ir_node *right = get_binop_right(irn);
tarval *tv = NULL;
static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) {
dbg_info *dbg = get_irn_dbg_info(div);
ir_node *n = get_binop_left(div);
- ir_node *block = get_nodes_block(div);
+ ir_node *block = get_irn_n(div, -1);
ir_mode *mode = get_irn_mode(n);
int bits = get_mode_size_bits(mode);
ir_node *q, *t, *c;
left = get_Div_left(irn);
mode = get_irn_mode(left);
- block = get_nodes_block(irn);
+ block = get_irn_n(irn, -1);
dbg = get_irn_dbg_info(irn);
bits = get_mode_size_bits(mode);
left = get_Mod_left(irn);
mode = get_irn_mode(left);
- block = get_nodes_block(irn);
+ block = get_irn_n(irn, -1);
dbg = get_irn_dbg_info(irn);
bits = get_mode_size_bits(mode);
n = (bits + 7) / 8;
left = get_DivMod_left(irn);
mode = get_irn_mode(left);
- block = get_nodes_block(irn);
+ block = get_irn_n(irn, -1);
dbg = get_irn_dbg_info(irn);
bits = get_mode_size_bits(mode);
*/
static void ird_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
if (dump_anchors) {
- irg_walk_anchors(irg, pre, post, env);
+ int i;
+
+ if (pre)
+ pre(irg->anchor, env);
+
+ for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+ ir_node *n = get_irg_anchor(irg, i);
+
+ if (n) {
+ /* reset the visit flag: will be increase in the walker */
+ set_irg_visited(irg, get_irg_visited(irg) - 1);
+ irg_walk(n, pre, post, env);
+ }
+ }
+ if (post)
+ post(irg->anchor, env);
} else {
irg_walk_graph(irg, pre, post, env);
}
* do not use get_nodes_block() here, will fail
* if the irg is not pinned.
*/
- if (get_nodes_block(from) == get_nodes_block(get_irn_n(from, to)))
+ if (get_irn_n(from, -1) == get_irn_n(get_irn_n(from, to), -1))
fprintf(F, INTRA_DATA_EDGE_ATTR);
else
fprintf(F, INTER_DATA_EDGE_ATTR);
* do not use get_nodes_block() here, will fail
* if the irg is not pinned.
*/
- if (get_nodes_block(from) == get_nodes_block(get_irn_n(from, to)))
+ if (get_irn_n(from, -1) == get_irn_n(get_irn_n(from, to), -1))
fprintf(F, INTRA_MEM_EDGE_ATTR);
else
fprintf(F, INTER_MEM_EDGE_ATTR);
fprintf(F, KEEP_ALIVE_DF_EDGE_ATTR);
}
break;
- case iro_Anchor:
- fprintf(F, ANCHOR_EDGE_ATTR);
- break;
default:
if (is_Proj(from)) {
if (get_irn_mode(from) == mode_M)
if (get_irn_pinned(n) == op_pin_state_floats &&
get_irg_pinned(get_irn_irg(n)) == op_pin_state_floats) {
fprintf(F, " node was pinned in ");
- dump_node_opcode(F, get_nodes_block(n));
- fprintf(F, " %ld\n", get_irn_node_nr(get_nodes_block(n)));
+ dump_node_opcode(F, get_irn_n(n, -1));
+ fprintf(F, " %ld\n", get_irn_node_nr(get_irn_n(n, -1)));
}
fprintf(F, " arity: %d\n", get_irn_intra_arity(n));
fprintf(F, " pred nodes: \n");
if (!is_Block(n)) {
fprintf(F, " -1: ");
- dump_node_opcode(F, get_nodes_block(n));
- fprintf(F, " %ld\n", get_irn_node_nr(get_nodes_block(n)));
+ dump_node_opcode(F, get_irn_n(n, -1));
+ fprintf(F, " %ld\n", get_irn_node_nr(get_irn_n(n, -1)));
}
for ( i = 0; i < get_irn_intra_arity(n); ++i) {
fprintf(F, " %d: %s ", i, is_intra_backedge(n, i) ? "be" : " ");
}
if (edges_activated_kind(irg, EDGE_KIND_BLOCK) && is_Block(src)) {
- ir_node *bl_old = old_tgt ? get_nodes_block(skip_Proj(old_tgt)) : NULL;
+ /* do not use get_nodes_block() here, it fails when running unpinned */
+ ir_node *bl_old = old_tgt ? get_irn_n(skip_Proj(old_tgt), -1) : NULL;
ir_node *bl_tgt = NULL;
if (tgt)
- bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(skip_Proj(tgt));
+ bl_tgt = is_Bad(tgt) ? tgt : get_irn_n(skip_Proj(tgt), -1);
edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
}
if (get_irn_arity(node) == arity) {
/* keep old array */
} else {
- ir_node *block = get_nodes_block(node);
+ /* don't use get_nodes_block here, we allow turn_into_tuple for unpinned nodes */
+ ir_node *block = get_irn_n(node, -1);
/* Allocate new array, don't free old in_array, it's on the obstack. */
edges_node_deleted(node, current_ir_graph);
node->in = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity+1);
/* clear the new in array, else edge_notify tries to delete garbage */
memset(node->in, 0, (arity+1) * sizeof(node->in[0]));
- set_nodes_block(node, block);
+ set_irn_n(node, -1, block);
}
}
*/
static void move(ir_node *node, ir_node *from_bl, ir_node *to_bl) {
int i, arity;
- ir_node *pred;
-
- /* move this node: Projs are moved automagically */
- if (! is_Proj(node))
- set_nodes_block(node, to_bl);
+ ir_node *proj, *pred;
+
+ /* move this node */
+ set_nodes_block(node, to_bl);
+
+ /* move its projs */
+ if (get_irn_mode(node) == mode_T) {
+ proj = get_irn_link(node);
+ while (proj) {
+ if (get_nodes_block(proj) == from_bl)
+ set_nodes_block(proj, to_bl);
+ proj = get_irn_link(proj);
+ }
+ }
/* recursion ... */
if (get_irn_op(node) == op_Phi) return;
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
block = get_nodes_block(n);
- set_nodes_block(nn, get_new_node(block));
+ set_irn_n(nn, -1, get_new_node(block));
j = 0;
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
assert(irn_not_visited(n));
mark_irn_visited(n);
-#ifndef CAN_PLACE_PROJS
- while (is_Proj(n)) {
- n = get_Proj_pred(n);
- mark_irn_visited(n);
- }
-#endif
-
/* Place floating nodes. */
if (get_irn_pinned(n) == op_pin_state_floats) {
- ir_node *curr_block = get_nodes_block(n);
+ ir_node *curr_block = get_irn_n(n, -1);
int in_dead_block = is_Block_unreachable(curr_block);
int depth = 0;
ir_node *b = NULL; /* The block to place this node in */
*/
if (! in_dead_block) {
if (get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_nodes_block(pred)))
+ is_Block_unreachable(get_irn_n(pred, -1)))
set_nodes_block(pred, curr_block);
}
place_floats_early(pred, worklist);
/* Because all loops contain at least one op_pin_state_pinned node, now all
our inputs are either op_pin_state_pinned or place_early() has already
been finished on them. We do not have any unfinished inputs! */
- pred_block = get_nodes_block(pred);
+ pred_block = get_irn_n(pred, -1);
if ((!is_Block_dead(pred_block)) &&
(get_Block_dom_depth(pred_block) > depth)) {
b = pred_block;
depth = get_Block_dom_depth(pred_block);
}
/* Avoid that the node is placed in the Start block */
- if ((depth == 1) && (get_Block_dom_depth(get_nodes_block(n)) > 1)
+ if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)
&& get_irg_phase_state(current_ir_graph) != phase_backend) {
b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
assert(b != get_irg_start_block(current_ir_graph));
}
} else if (is_Phi(n)) {
ir_node *pred;
- ir_node *curr_block = get_nodes_block(n);
+ ir_node *curr_block = get_irn_n(n, -1);
int in_dead_block = is_Block_unreachable(curr_block);
/*
* Phi nodes: move nodes from dead blocks into the effective use
* of the Phi-input if the Phi is not in a bad block.
*/
- pred = get_nodes_block(n);
+ pred = get_irn_n(n, -1);
if (irn_not_visited(pred))
waitq_put(worklist, pred);
if (irn_not_visited(pred)) {
if (! in_dead_block &&
get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_nodes_block(pred))) {
+ is_Block_unreachable(get_irn_n(pred, -1))) {
set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
}
waitq_put(worklist, pred);
}
} else {
ir_node *pred;
- ir_node *curr_block = get_nodes_block(n);
+ ir_node *curr_block = get_irn_n(n, -1);
int in_dead_block = is_Block_unreachable(curr_block);
/*
* All other nodes: move nodes from dead blocks into the same block.
*/
- pred = get_nodes_block(n);
+ pred = get_irn_n(n, -1);
if (irn_not_visited(pred))
waitq_put(worklist, pred);
if (irn_not_visited(pred)) {
if (! in_dead_block &&
get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_nodes_block(pred))) {
+ is_Block_unreachable(get_irn_n(pred, -1))) {
set_nodes_block(pred, curr_block);
}
waitq_put(worklist, pred);
}
if (! block)
- block = get_nodes_block(producer);
+ block = get_irn_n(producer, -1);
} else {
assert(is_no_Block(consumer));
block = get_nodes_block(consumer);
dca = get_deepest_common_ancestor(succ, dca);
} else {
/* ignore if succ is in dead code */
- succ_blk = get_nodes_block(succ);
+ succ_blk = get_irn_n(succ, -1);
if (is_Block_unreachable(succ_blk))
continue;
dca = consumer_dom_dca(dca, succ, node);
return dca;
}
-#ifdef CAN_PLACE_PROJS
static void set_projs_block(ir_node *node, ir_node *block)
{
int i;
set_nodes_block(succ, block);
}
}
-#endif
/**
* Find the latest legal block for N and place N into the
(get_irn_mode(n) != mode_X)) {
/* Remember the early_blk placement of this block to move it
out of loop no further than the early_blk placement. */
- early_blk = get_nodes_block(n);
+ early_blk = get_irn_n(n, -1);
/*
* BEWARE: Here we also get code, that is live, but
if (dca != NULL) {
set_nodes_block(n, dca);
move_out_of_loops(n, early_blk);
-#ifdef CAN_PLACE_PROJS
if(get_irn_mode(n) == mode_T) {
set_projs_block(n, get_nodes_block(n));
}
-#endif
}
}
}
irg->fp_model = model;
}
+/**
+ * walker Start->End: places Proj nodes into the same block
+ * as it's predecessors
+ *
+ * @param n the node
+ * @param env ignored
+ */
+static void normalize_proj_walker(ir_node *n, void *env) {
+ (void) env;
+ if (is_Proj(n)) {
+ ir_node *pred = get_Proj_pred(n);
+ ir_node *block = get_nodes_block(pred);
+
+ set_nodes_block(n, block);
+ }
+}
+
+/* move Proj nodes into the same block as its predecessors */
+void normalize_proj_nodes(ir_graph *irg) {
+ irg_walk_graph(irg, NULL, normalize_proj_walker, NULL);
+ set_irg_outs_inconsistent(irg);
+}
+
/* set a description for local value n */
void set_irg_loc_description(ir_graph *irg, int n, void *description) {
assert(0 <= n && n < irg->n_loc);
static INLINE void
_set_irg_end_block(ir_graph *irg, ir_node *node) {
- /* FIXME: if this line is killed the whole graph collapse, why */
set_irn_n(irg->anchor, -1, node);
set_irn_n(irg->anchor, anchor_end_block, node);
}
pre(node, env);
if (node->op != op_Block) {
- ir_node *pred = get_nodes_block(node);
+ ir_node *pred = get_irn_n(node, -1);
if (pred->visited < irg->visited)
cnt += irg_walk_2_pre(pred, pre, env);
}
set_irn_visited(node, irg->visited);
if (node->op != op_Block) {
- ir_node *pred = get_nodes_block(node);
+ ir_node *pred = get_irn_n(node, -1);
if (pred->visited < irg->visited)
cnt += irg_walk_2_post(pred, post, env);
}
pre(node, env);
if (node->op != op_Block) {
- ir_node *pred = get_nodes_block(node);
+ ir_node *pred = get_irn_n(node, -1);
if (pred->visited < irg->visited)
cnt += irg_walk_2_both(pred, pre, post, env);
}
pre(node, env);
if (node->op != op_Block) {
- ir_node *pred = get_nodes_block(node);
+ ir_node *pred = get_irn_n(node, -1);
if (pred->visited < irg->visited)
cnt += irg_walk_in_or_dep_2_pre(pred, pre, env);
}
set_irn_visited(node, irg->visited);
if (node->op != op_Block) {
- ir_node *pred = get_nodes_block(node);
+ ir_node *pred = get_irn_n(node, -1);
if (pred->visited < irg->visited)
cnt += irg_walk_in_or_dep_2_post(pred, post, env);
}
pre(node, env);
if (node->op != op_Block) {
- ir_node *pred = get_nodes_block(node);
+ ir_node *pred = get_irn_n(node, -1);
if (pred->visited < irg->visited)
cnt += irg_walk_in_or_dep_2_both(pred, pre, post, env);
}
}
void
-set_irn_n(ir_node *node, int n, ir_node *in) {
+set_irn_n (ir_node *node, int n, ir_node *in) {
assert(node && node->kind == k_ir_node);
assert(-1 <= n);
assert(n < get_irn_arity(node));
node->in[n + 1] = in;
}
-int add_irn_n(ir_node *node, ir_node *in) {
+int add_irn_n(ir_node *node, ir_node *in)
+{
int pos;
ir_graph *irg = get_irn_irg(node);
}
int
-(get_irn_deps)(const ir_node *node) {
+(get_irn_deps)(const ir_node *node)
+{
return _get_irn_deps(node);
}
ir_node *
-(get_irn_dep)(const ir_node *node, int pos) {
+(get_irn_dep)(const ir_node *node, int pos)
+{
return _get_irn_dep(node, pos);
}
void
-(set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
+(set_irn_dep)(ir_node *node, int pos, ir_node *dep)
+{
_set_irn_dep(node, pos, dep);
}
-int add_irn_dep(ir_node *node, ir_node *dep) {
+int add_irn_dep(ir_node *node, ir_node *dep)
+{
int res = 0;
if (node->deps == NULL) {
/** manipulate fields of individual nodes **/
+/* this works for all except Block */
ir_node *
-(get_nodes_block)(const ir_node *node) {
- return _get_nodes_block(node);
+get_nodes_block(const ir_node *node) {
+ assert(node->op != op_Block);
+ assert(is_irn_pinned_in_irg(node) && "block info may be incorrect");
+ return get_irn_n(node, -1);
}
void
set_nodes_block(ir_node *node, ir_node *block) {
- node->op->ops.set_block(node, block);
+ assert(node->op != op_Block);
+ set_irn_n(node, -1, block);
}
/* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
* irg.
*/
if (! is_Block(node))
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
assert(get_irn_op(node) == op_Block);
return node->attr.block.irg;
}
int i, arity = get_irn_arity(n);
printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
if (!is_Block(n)) {
- ir_node *pred = get_nodes_block(n);
+ ir_node *pred = get_irn_n(n, -1);
printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
get_irn_node_nr(pred), (void *)pred);
}
return (get_kind(thing) == k_ir_node);
}
-static INLINE ir_node *_get_nodes_block(const ir_node *node) {
- assert(_is_ir_node(node));
- return node->op->ops.get_block(node);
-}
-
/**
* Gets the op of a node.
* Intern version for libFirm.
/* this section MUST contain all inline functions */
#define is_ir_node(thing) _is_ir_node(thing)
-#define get_nodes_block(node) _get_nodes_block(node)
#define get_irn_intra_arity(node) _get_irn_intra_arity(node)
#define get_irn_inter_arity(node) _get_irn_inter_arity(node)
#define get_irn_arity(node) _get_irn_arity(node)
* @return
* The operations.
*/
-static ir_op_ops *firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops) {
+static ir_op_ops *firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops)
+{
#define CASE(a) \
case iro_##a: \
ops->computed_value = computed_value_##a; \
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* Quot(x, 1) == x */
/* Turn Quot into a tuple (mem, jmp, bad, a) */
ir_node *mem = get_Quot_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Quot_max);
set_Tuple_pred(n, pn_Quot_M, mem);
set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
/* Turn DivMod into a tuple (mem, jmp, bad, a, 0) */
ir_node *a = get_DivMod_left(n);
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
ir_mode *mode = get_DivMod_resmode(n);
turn_into_tuple(n, pn_DivMod_max);
if (op == op_Load) {
/* get the Load address */
ir_node *addr = get_Load_ptr(a);
- ir_node *blk = get_nodes_block(a);
+ ir_node *blk = get_irn_n(a, -1);
ir_node *confirm;
if (value_not_null(addr, &confirm)) {
} else if (op == op_Store) {
/* get the load/store address */
ir_node *addr = get_Store_ptr(a);
- ir_node *blk = get_nodes_block(a);
+ ir_node *blk = get_irn_n(a, -1);
ir_node *confirm;
if (value_not_null(addr, &confirm)) {
if (mode_is_num(mode)) {
if (a == b) {
- ir_node *block = get_nodes_block(n);
+ ir_node *block = get_irn_n(n, -1);
n = new_rd_Mul(
get_irn_dbg_info(n),
n = new_rd_Sub(
get_irn_dbg_info(n),
current_ir_graph,
- get_nodes_block(n),
+ get_irn_n(n, -1),
b,
get_Minus_op(a),
mode);
n = new_rd_Sub(
get_irn_dbg_info(n),
current_ir_graph,
- get_nodes_block(n),
+ get_irn_n(n, -1),
a,
get_Minus_op(b),
mode);
ir_node *mb = get_Mul_right(a);
if (b == ma) {
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
ma,
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
} else if (b == mb) {
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
mb,
ir_node *mb = get_Mul_right(b);
if (a == ma) {
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
ma,
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
} else if (a == mb) {
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
mb,
else if (is_Not(a) && classify_Const(b) == CNST_ONE) {
/* ~x + 1 = -x */
ir_node *op = get_Not_op(a);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
}
if (left == b) {
if (mode != get_irn_mode(right)) {
/* This Sub is an effective Cast */
- right = new_r_Conv(get_irn_irg(n), get_nodes_block(n), right, mode);
+ right = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), right, mode);
}
n = right;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
} else if (right == b) {
if (mode != get_irn_mode(left)) {
/* This Sub is an effective Cast */
- left = new_r_Conv(get_irn_irg(n), get_nodes_block(n), left, mode);
+ left = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), left, mode);
}
n = left;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
n = new_rd_Minus(
get_irn_dbg_info(n),
current_ir_graph,
- get_nodes_block(n),
+ get_irn_n(n, -1),
b,
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A);
ir_node *mb = get_Mul_right(a);
if (ma == b) {
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
n = new_rd_Mul(
get_irn_dbg_info(n),
current_ir_graph, blk,
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
} else if (mb == b) {
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
n = new_rd_Mul(
get_irn_dbg_info(n),
current_ir_graph, blk,
} else if (get_irn_op(a) == op_Sub) {
ir_node *x = get_Sub_left(a);
ir_node *y = get_Sub_right(a);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
ir_mode *m_b = get_irn_mode(b);
ir_mode *m_y = get_irn_mode(y);
ir_node *add;
else if (value_of(b) == get_mode_minus_one(mode))
r = a;
if (r) {
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), r, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), r, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
if (value != n) {
/* Turn Div into a tuple (mem, jmp, bad, value) */
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
if (value != n) {
/* Turn Mod into a tuple (mem, jmp, bad, value) */
ir_node *mem = get_Mod_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
if (evaluated) { /* replace by tuple */
ir_node *mem = get_DivMod_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
* not run it in the equivalent_node() context.
*/
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
- get_nodes_block(n), a, mode);
+ get_irn_n(n, -1), a, mode);
DBG_OPT_CONFIRM(oldn, n);
} else if (sign == value_classified_positive) {
ir_mode *a_mode = get_irn_mode(a_op);
ir_mode *b_mode = get_irn_mode(b_op);
if(a_mode == b_mode && (mode_is_int(a_mode) || a_mode == mode_b)) {
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
n = exact_copy(n);
set_binop_left(n, a_op);
if (c != NULL) {
/* (a sop c) & (b sop c) => (a & b) sop c */
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
ir_node *new_n = exact_copy(n);
set_binop_left(new_n, op1);
n = new_rd_And(dbgi, irg, blk, new_n, c, mode);
} else {
n = exact_copy(a);
- set_nodes_block(n, blk);
+ set_irn_n(n, -1, blk);
set_binop_left(n, new_n);
set_binop_right(n, c);
}
if (a == b) {
/* a ^ a = 0 */
- n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n),
+ n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1),
mode, get_mode_null(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A);
} else if ((mode == mode_b)
&& (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)
&& (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
/* The Eor negates a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
+ n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a), mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL);
&& (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) {
/* The Eor is a Not. Replace it by a Not. */
/* ????!!!Extend to bitfield 1111111. */
- n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode_b);
+ n = new_r_Not(current_ir_graph, get_irn_n(n, -1), a, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
} else {
&& (get_irn_mode(a) == mode_b)
&& (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
/* We negate a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
+ n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
return n;
if (op_a == op_Sub && classify_Const(get_Sub_right(a)) == CNST_ONE) {
/* ~(x-1) = -x */
ir_node *op = get_Sub_left(a);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, get_irn_mode(n));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_MINUS_1);
}
ir_node *op = get_Not_op(a);
ir_mode *mode = get_irn_mode(op);
tarval *tv = get_mode_one(mode);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, op, c, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_NOT);
ir_type *tp = get_irn_type(n);
if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) {
- n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred),
+ n = new_rd_Const_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred),
get_Const_tarval(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
} else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) {
- n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred),
+ n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_SymConst_symbol(pred),
get_SymConst_kind(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
}
proj_nr = get_Proj_proj(proj);
switch (proj_nr) {
case pn_Div_X_regular:
- return new_r_Jmp(current_ir_graph, get_nodes_block(div));
+ return new_r_Jmp(current_ir_graph, get_irn_n(div, -1));
case pn_Div_X_except:
/* we found an exception handler, remove it */
switch (proj_nr) {
case pn_Mod_X_regular:
- return new_r_Jmp(current_ir_graph, get_nodes_block(mod));
+ return new_r_Jmp(current_ir_graph, get_irn_n(mod, -1));
case pn_Mod_X_except:
/* we found an exception handler, remove it */
switch (proj_nr) {
case pn_DivMod_X_regular:
- return new_r_Jmp(current_ir_graph, get_nodes_block(divmod));
+ return new_r_Jmp(current_ir_graph, get_irn_n(divmod, -1));
case pn_DivMod_X_except:
/* we found an exception handler, remove it */
}
if (changed) {
- ir_node *block = get_nodes_block(n); /* Beware of get_nodes_Block() */
+ ir_node *block = get_irn_n(n, -1); /* Beware of get_nodes_Block() */
if (changed & 2) /* need a new Const */
right = new_Const(mode, tv);
in[i] = get_Confirm_value(pred);
}
/* move the Confirm nodes "behind" the Phi */
- block = get_nodes_block(phi);
+ block = get_irn_n(phi, -1);
new_Phi = new_r_Phi(current_ir_graph, block, n, in, get_irn_mode(phi));
return new_r_Confirm(current_ir_graph, block, new_Phi, bound, pnc);
}
}
/* ok, all conditions met */
- block = get_nodes_block(or);
+ block = get_irn_n(or, -1);
new_and = new_r_And(current_ir_graph, block,
value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
return or;
/* yet, condition met */
- block = get_nodes_block(or);
+ block = get_irn_n(or, -1);
n = new_r_Rot(current_ir_graph, block, x, c1, mode);
return or;
/* yet, condition met */
- block = get_nodes_block(or);
+ block = get_irn_n(or, -1);
/* a Rot Left */
n = new_r_Rot(current_ir_graph, block, x, v, mode);
if (flag) {
/* ok, we can replace it */
- ir_node *in[2], *irn, *block = get_nodes_block(n);
+ ir_node *in[2], *irn, *block = get_irn_n(n, -1);
in[0] = get_binop_left(left);
in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
ir_node *t = get_Mux_true(n);
if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
- ir_node *block = get_nodes_block(n);
+ ir_node *block = get_irn_n(n, -1);
/*
* Note: normalization puts the constant on the right site,
ir_node *old = n;
n = identify(value_table, n);
- if (get_nodes_block(old) != get_nodes_block(n))
+ if (get_irn_n(old, -1) != get_irn_n(n, -1))
set_irg_pinned(current_ir_graph, op_pin_state_floats);
return n;
} /* identify_cons */
return optimize_in_place_2(n);
} /* optimize_in_place */
-/**
- * Return the block for all default nodes.
- */
-static ir_node *get_block_default(const ir_node *self) {
- return get_irn_n(self, -1);
-}
-
-/**
- * Sets the block for all default nodes.
- */
-static void set_block_default(ir_node *self, ir_node *blk) {
- set_irn_n(self, -1, blk);
-}
-
-/**
- * It's not allowed to get the block of a block. Anyway, returns
- * the macroblock header in release mode.
- */
-static ir_node *get_block_Block(const ir_node *self) {
- assert(!"get_nodes_block() called for a block");
- return get_irn_n(self, -1);
-}
-
-/**
- * It's not allowed to set the block of a block. In release mode sets
- * the macroblock header.
- */
-static void set_block_Block(ir_node *self, ir_node *blk) {
- assert(!"set_nodes_block() called for a block");
- set_irn_n(self, -1, blk);
-}
-
-/**
- * The anchor is always placed in the endblock or a graph.
- */
-static ir_node *get_block_Anchor(const ir_node *self) {
- return get_irn_n(self, anchor_end_block);
-}
-
-/**
- * It's forbidden to set the anchor block.
- */
-static void set_block_Anchor(ir_node *self, ir_node *blk) {
- (void) self;
- (void) blk;
- assert(!"set_nodes_block() called for the Anchor");
-}
-
-/**
- * Proj nodes are always in the block of it's predecessor.
- */
-static ir_node *get_block_Proj(const ir_node *self) {
- ir_node *pred = get_Proj_pred(self);
- return get_nodes_block(pred);
-}
-
-/**
- * Proj nodes silently ignore the block set request.
- */
-static void set_block_Proj(ir_node *self, ir_node *blk) {
- (void) self;
- (void) blk;
- assert(blk == get_block_Proj(self) && "trying to move Proj in another block!");
-}
-
-/**
- * Set the default get_block operation.
- */
-static ir_op_ops *firm_set_default_get_block(ir_opcode code, ir_op_ops *ops) {
-#define CASE(a) \
- case iro_##a: \
- ops->get_block = get_block_##a; \
- ops->set_block = set_block_##a; \
- break
-
- switch (code) {
- CASE(Block);
- CASE(Anchor);
-#ifndef CAN_PLACE_PROJS
- CASE(Proj);
-#endif
- default:
- /* not allowed to be NULL */
- if (! ops->get_block)
- ops->get_block = get_block_default;
- if (! ops->set_block)
- ops->set_block = set_block_default;
- }
-
- return ops;
-#undef CASE
-} /* firm_set_default_get_block */
-
/*
* Sets the default operation for an ir_ops.
*/
ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops) {
- ops = firm_set_default_get_block(code, ops);
ops = firm_set_default_computed_value(code, ops);
ops = firm_set_default_equivalent_node(code, ops);
ops = firm_set_default_transform_node(code, ops);
/* currently this checks fails for blocks with exception
outputs (and these are NOT basic blocks). So it is disabled yet. */
ASSERT_AND_RET_DBG(
- (pred_i == pred_j) || (get_nodes_block(pred_i) != get_nodes_block(pred_j)),
+ (pred_i == pred_j) || (get_irn_n(pred_i, -1) != get_irn_n(pred_j, -1)),
"At least two different PhiM predecessors are in the same block",
0,
- ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_nodes_block(pred_i))
+ ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_irn_n(pred_i, -1))
);
#endif
}
DB((dbg, LEVEL_1, "===> Performing condition evaluation on %+F\n", irg));
remove_critical_cf_edges(irg);
+ normalize_proj_nodes(irg);
edges_assure(irg);
set_using_irn_link(irg);
/* Phi always stop the recursion */
if (is_Phi(node))
- return get_nodes_block(node) == block;
+ return get_irn_intra_n(node, -1) == block;
if (! is_nice_value(node))
return 0;
/* Phi always stop the recursion */
if (is_Phi(node)) {
- if (get_nodes_block(node) == block)
+ if (get_irn_intra_n(node, -1) == block)
return get_Phi_pred(node, pos);
return node;
}
need_new = 0;
do {
ir_node *pred = get_irn_intra_n(node, i);
- ir_node *pred_blk = get_nodes_block(pred);
+ ir_node *pred_blk = get_irn_intra_n(pred, -1);
ir_node *leader = value_lookup(get_block_info(pred_blk)->avail_out, pred);
in[i] = translate(leader ? leader : pred, block, pos, env);
need_new |= (in[i] != pred);
struct obstack *old;
if (is_Phi(node)) {
- if (get_nodes_block(node) == block)
+ if (get_irn_intra_n(node, -1) == block)
return get_Phi_pred(node, pos);
return node;
}
/* check if the node has at least one Phi predecessor */
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_intra_n(node, i);
- ir_node *pred_bl = get_nodes_block(pred);
+ ir_node *pred_bl = get_irn_intra_n(pred, -1);
ir_node *leader = value_lookup(get_block_info(pred_bl)->avail_out, pred);
leader = leader != NULL ? leader : pred;
- if (is_Phi(leader) && get_nodes_block(pred) == block)
+ if (is_Phi(leader) && get_irn_intra_n(pred, -1) == block)
break;
}
if (i >= arity) {
node might depend on that. */
copy_node_attr(node, nn);
- set_nodes_block(nn, get_nodes_block(node));
+ set_irn_n(nn, -1, get_irn_intra_n(node, -1));
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_intra_n(node, i);
- ir_node *pred_bl = get_nodes_block(pred);
+ ir_node *pred_bl = get_irn_intra_n(pred, -1);
ir_node *leader = value_lookup(get_block_info(pred_bl)->avail_out, pred);
leader = leader != NULL ? leader : pred;
- if (is_Phi(leader) && get_nodes_block(pred) == block)
+ if (is_Phi(leader) && get_irn_intra_n(pred, -1) == block)
set_irn_n(nn, i, get_Phi_pred(leader, pos));
else
set_irn_n(nn, i, leader);
for (i = get_irn_intra_arity(n) - 1; i >= 0; --i) {
pred = get_irn_intra_n(n, i);
- pred_blk = get_nodes_block(pred);
+ pred_blk = get_irn_intra_n(pred, -1);
if (block_dominates(pred_blk, blk))
continue;
/* pred do not dominate it, but may be in the set */
a_env.end_block = get_irg_end_block(irg);
a_env.pairs = NULL;
+ /* Move Proj's into the same block as their args,
+ else we would assign the result to wrong blocks */
+ normalize_proj_nodes(irg);
+
/* critical edges MUST be removed */
remove_critical_cf_edges(irg);
irg_block_walk_graph(irg, AliasSetDestroyer, NULL, NULL);
obstack_free(&obst, NULL);
+ normalize_proj_nodes(irg);
irg_walk_graph(irg, NormaliseSync, NULL, NULL);
- optimize_graph_df(irg);
+ optimize_graph_df(irg);
irg_walk_graph(irg, NormaliseSync, NULL, NULL);
dump_ir_block_graph(irg, "-postfluffig");
}
DB((dbg, LEVEL_4, " = OVERFLOW"));
return NULL;
}
- return new_r_Const(current_ir_graph, get_nodes_block(rc), get_tarval_mode(tv), tv);
+ return new_r_Const(current_ir_graph, get_irn_n(rc, -1), get_tarval_mode(tv), tv);
}
return do_apply(e->code, NULL, rc, e->rc, get_irn_mode(rc));
}
(void) env;
set_irn_link(irn, NULL);
- /* FIXME: must be removed but edges must be fixed first*/
if (is_Proj(irn)) {
ir_node *pred = get_Proj_pred(irn);
- set_irn_n(irn, -1, get_nodes_block(pred));
+ set_irn_n(irn, -1, get_irn_n(pred, -1));
}
}