/* Else, do it the old-fashioned way. */
ir_node *block;
- assert(get_irn_op(old)->opar != oparity_dynamic);
-
hook_turn_into_id(old);
block = old->in[0];
}
}
+ if (get_irn_op(old)->opar == oparity_dynamic) {
+ DEL_ARR_F(get_irn_in(old));
+ }
+
old->op = op_Id;
old->in = NEW_ARR_D (ir_node *, irg->obst, 2);
old->in[0] = block;
* all Proj nodes to there predecessors and all
* partBlocks to there MacroBlock header.
*/
-static void collect(ir_node *n, void *env) {
+static void collect_phiprojs_walker(ir_node *n, void *env) {
ir_node *pred;
(void) env;
if (is_Phi(n)) {
ir_node *block = get_nodes_block(n);
- set_Phi_next(n, get_Block_phis(block));
- set_Block_phis(block, n);
+ add_Block_phi(block, n);
} else if (is_Proj(n)) {
pred = n;
do {
/**
* clear all links, including the Phi list of blocks and Phi nodes.
*/
-static void clear_links(ir_node *n, void *env) {
+static void clear_node_and_phis_links(ir_node *n, void *env) {
(void) env;
set_irn_link(n, NULL);
}
void collect_phiprojs(ir_graph *irg) {
- irg_walk_graph(irg, clear_links, collect, NULL);
+ irg_walk_graph(irg, clear_node_and_phis_links, collect_phiprojs_walker, NULL);
}
-
/*--------------------------------------------------------------------*/
/* Functionality for part_block */
/*--------------------------------------------------------------------*/
ir_node *old_block;
ir_node *phi;
ir_node *mbh;
+ ir_node *next, *block;
/* Turn off optimizations so that blocks are not merged again. */
int rem_opt = get_opt_optimize();
if (mbh != old_block) {
/* we splitting a partBlock */
- set_irn_n(new_block, -1, mbh);
+ set_Block_MacroBlock(new_block, mbh);
} else {
/* we are splitting a header: this creates a new header */
- set_irn_n(new_block, -1, new_block);
+ set_Block_MacroBlock(new_block, new_block);
}
set_irg_current_block(current_ir_graph, new_block);
{
/* rewire partBlocks */
if (mbh != old_block) {
- ir_node *next, *block = get_irn_link(mbh);
+ ir_node *list = NULL;
+
+ /* move blocks from mbh to old_block if old_block dominates them */
+ block = get_irn_link(mbh);
set_irn_link(mbh, NULL);
- set_irn_link(old_block, NULL);
+ set_Block_MacroBlock(old_block, old_block);
/* note that we must splice the list of partBlock here */
for (; block != NULL; block = next) {
assert(is_Block(curr));
next = get_irn_link(block);
+
+ if (block == old_block)
+ continue;
+
assert(get_Block_MacroBlock(curr) == mbh);
for (;;) {
if (curr == old_block) {
/* old_block dominates the block, so old_block will be
the new macro block header */
- set_irn_n(block, -1, old_block);
- set_irn_link(block, get_irn_link(old_block));
- set_irn_link(old_block, block);
+ set_Block_MacroBlock(block, old_block);
+ set_irn_link(block, list);
+ list = block;
break;
}
if (curr == mbh) {
curr = get_Block_cfgpred_block(curr, 0);
}
}
+ /* beware: do NOT directly manipulate old_block's list, as old_block is
+ in mbh's list and this would destroy the list! */
+ set_irn_link(old_block, list);
+
+ /* finally add new_block to mbh's list */
+ set_irn_link(new_block, get_irn_link(mbh));
+ set_irn_link(mbh, new_block);
+ } else {
+ /* move blocks from mbh to new_block */
+ block = get_irn_link(mbh);
+
+ set_irn_link(mbh, NULL);
+ set_irn_link(new_block, NULL);
+
+ for (; block != NULL; block = next) {
+ next = get_irn_link(block);
+
+ set_Block_MacroBlock(block, new_block);
+ set_irn_link(block, get_irn_link(new_block));
+ set_irn_link(new_block, block);
+ }
}
set_optimize(rem_opt);