Adapted End node.
Adapted Phi constructors so that memory phis are added.
Adapted Cond optimization: add block to End.
Adapted dead node elimination: compation of Ends predecessors
Adapted inlineing: remember keepalives of inlined graph.
Added informative strings to asserts in irvrfy.
[r301]
res = optimize (res);
irn_vrfy (res);
+
+ /* Memory Phis in endless loops must be kept alive.
+ As we can't distinguish these easily we keep all of them alive. */
+ if ((res->op == op_Phi) && (mode == mode_M))
+ add_End_keepalive(irg->end, res);
return res;
}
} else {
res = optimize (res);
irn_vrfy (res);
+ /* Memory Phis in endless loops must be kept alive.
+ As we can't distinguish these easily we keep all of the alive. */
+ if ((res->op == op_Phi) && (mode == mode_M))
+ add_End_keepalive(irg->end, res);
}
return res;
current_ir_graph->current_block->attr.block.graph_arr[0] = store;
}
+inline void
+keep_alive (ir_node *ka)
+{
+ add_End_keepalive(current_ir_graph->end, ka);
+}
/** Useful access routines **/
/* Returns the current block of the current graph. To set the current
* void set_value (int pos, ir_node *value);
* ir_node *get_store (void);
* void set_store (ir_node *store);
- *
+ * keep_alive (ir_node ka)
*
* IR_NODES AND CONSTRUCTORS FOR IR_NODES
* =======================================
*
* Returns the node defining the actual store.
* Requires current_block to be set correctly.
+ *
+ *
+ * inline void keep_alive (ir_node *ka)
+ * ------------------------------------
+ *
+ * Keep this node alive because it is (might be) not in the control
+ * flow from Start to End. Adds the node to the list in the end
+ * node.
+ *
*****
*/
/* Write a store. */
void set_store (ir_node *store);
+/* keep this node alive even if End is not control-reachable from it */
+inline void keep_alive (ir_node *ka);
+
/** Useful access routines **/
/* Returns the current block of the current graph. To set the current
block use switch_block(). */
int edge_label = 1;
/* A compiler option to turn off dumping values of constant entities */
int const_entities = 1;
+/* A compiler option to dump the keep alive edges */
+int dump_keepalive = 1;
/* A global variable to record output of the Bad node. */
int Bad_dumped;
/* dump edges to our inputs */
void
dump_ir_data_edges(ir_node *n) {
- int i;
+ int i, max;
+
+ if ((get_irn_op(n) == op_End) && (!dump_keepalive))
+ return;
for (i = 0; i < get_irn_arity(n); i++) {
assert(get_irn_n(n, i));
void dump_constant_entity_values() {
const_entities = 0;
}
+
+void dump_keepalive_edges() {
+ dump_keepalive = 1;
+}
void dump_constant_entity_values();
+/****m* irdump/dump_constant_entity_values
+ *
+ * NAME
+ * dump_keepalive_edges
+ * SYNOPSIS
+ * void dump_keepalive_edges()
+ * FUNCTION
+ * Turns on dumping the edges from the End node to nodes to be kept
+ * alive
+ * INPUTS
+ * No inputs
+ * RESULT
+ * SEE ALSO
+ *
+ ***
+ */
+void dump_keepalive_edges();
+
+
# endif /* _IRDUMP_H_ */
set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
}
/* Now the new node is complete. We can add it to the hash table for cse. */
- /* add_identity (current_ir_graph->value_table, nn); */
add_identities (current_ir_graph->value_table, nn);
}
+/* Copies the graph resucsively, compacts the keepalive of the end node. */
+void
+copy_graph () {
+ ir_node *oe, *ne; /* old end, new end */
+ ir_node *ka; /* keep alive */
+ int i;
+
+ oe = get_irg_end(current_ir_graph);
+ /* copy the end node by hand, allocate dynamic in array! */
+ ne = new_ir_node(current_ir_graph,
+ NULL,
+ op_End,
+ mode_X,
+ -1,
+ NULL);
+ /* Copy the attributes. Well, there might be some in the future... */
+ copy_attrs(oe, ne);
+ set_new_node(oe, ne);
+
+ /* copy the live nodes */
+ irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
+ /* copy_preds for the end node ... */
+ set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
+
+ /** ... and now the keep alives. **/
+ /* First pick the not marked block nodes and walk them. We must pick these
+ first as else we will oversee blocks reachable from Phis. */
+ for (i = 0; i < get_irn_arity(oe); i++) {
+ ka = get_irn_n(oe, i);
+ if ((get_irn_op(ka) == op_Block) &&
+ (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
+ /* We must keep the block alive and copy everything reachable */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+ irg_walk(ka, copy_node, copy_preds, NULL);
+ add_End_keepalive(ne, get_new_node(ka));
+ }
+ }
+
+ /* Now pick the Phis. Here we will keep all! */
+ for (i = 0; i < get_irn_arity(oe); i++) {
+ ka = get_irn_n(oe, i);
+ if ((get_irn_op(ka) == op_Phi)) {
+ if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
+ /* We didn't copy the Phi yet. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+ irg_walk(ka, copy_node, copy_preds, NULL);
+ }
+ add_End_keepalive(ne, get_new_node(ka));
+ }
+ }
+}
+
/* Copies the graph reachable from current_ir_graph->end to the obstack
- in current_ir_graph.
+ in current_ir_graph and fixes the environment.
Then fixes the fields in current_ir_graph containing nodes of the
graph. */
void
-copy_graph () {
+copy_graph_env () {
/* Not all nodes remembered in current_ir_graph might be reachable
from the end node. Assure their link is set to NULL, so that
we can test whether new nodes have been computed. */
inc_irg_block_visited(current_ir_graph);
/* copy the graph */
- irg_walk(get_irg_end(current_ir_graph), copy_node, copy_preds, NULL);
+ copy_graph();
/* fix the fields in current_ir_graph */
set_irg_end (current_ir_graph, get_new_node(get_irg_end(current_ir_graph)));
irg->value_table = new_identities ();
/* Copy the graph from the old to the new obstack */
- copy_graph();
+ copy_graph_env();
/* Free memory from old unoptimized obstack */
obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
graph. Both will end up being a tuple. **/
post_bl = get_nodes_Block(call);
set_irg_current_block(current_ir_graph, post_bl);
- /* XxMxPxP von Start + Parameter von Call */
+ /* XxMxPxP of Start + parameter of Call */
in[0] = new_Jmp();
in[1] = get_Call_mem(call);
in[2] = get_irg_frame(current_ir_graph);
}
/* visited is > than that of called graph. With this trick visited will
- remain unchanged so that an outer walker calling this inline will
- not visit the inlined nodes. */
+ remain unchanged so that an outer walker, e.g., searching the call nodes
+ to inline, calling this inline will not visit the inlined nodes. */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
/** Performing dead node elimination inlines the graph **/
set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
+ /** archive keepalives **/
+ for (i = 0; i < get_irn_arity(end); i++)
+ add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
/** Collect control flow from Return blocks to post_calls block. Replace
Return nodes by Jump nodes. **/
Further it assumes that all Phi nodes in a block of current_ir_graph
are assembled in a "link" list in the link field of the corresponding
block nodes. Further assumes that all Proj nodes are in a "link" list
- in the nodes producing the tuple. Conserves this feature for the old
+ in the nodes producing the tuple. (This is only a optical feature
+ for the graph.) Conserves this feature for the old
nodes of the graph. This precondition can be established by a call to
- collect_phis(), see irgmod.h.
+ collect_phisprojs(), see irgmod.h.
Called_graph must be unequal to current_ir_graph. Will not inline
if they are equal.
Sets visited masterflag in curren_ir_graph to max of flag in current
irg_walk_2(get_nodes_Block(node), pre, post, env);
}
for (i = get_irn_arity(node) - 1; i >= 0; --i) {
-/* printf(" "); DDMSG2(node); */
-/* printf(" "); DDMSG2(get_irn_n(node, i)); */
+ /* printf(" "); DDMSG2(node);
+ printf(" "); DDMSG2(get_irn_n(node, i)); */
irg_walk_2(get_irn_n(node, i), pre, post, env);
}
void (pre)(ir_node*, void*), void (post)(ir_node*, void*),
void *env)
{
+ ir_node *block, *pred;
+ int i;
+
assert(node);
inc_irg_block_visited(current_ir_graph);
- if (is_no_Block(node)) node = get_nodes_Block(node);
- assert(get_irn_opcode(node) == iro_Block);
- irg_block_walk_2(node, pre, post, env);
+ if (is_no_Block(node)) block = get_nodes_Block(node); else block = node;
+ assert(get_irn_opcode(block) == iro_Block);
+ irg_block_walk_2(block, pre, post, env);
+ /* keepalive: the endless loops ... */
+ if (get_irn_op(node) == op_End)
+ for (i = 0; i < get_irn_arity(node); i++) {
+ pred = get_irn_n(node, i);
+ if (get_irn_op(pred) == op_Block)
+ irg_block_walk_2(pred, pre, post, env);
+ }
+
return;
}
res->visited = 0;
res->link = NULL;
if (arity < 0) {
- res->in = NEW_ARR_F (ir_node *, 1);
+ res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
} else {
res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
assert (node->op == op_Block);
node->attr.block.graph_arr[pos+1] = value;
}
+
+inline void
+add_End_keepalive (ir_node *end, ir_node *ka) {
+ assert (end->op == op_End);
+ ARR_APP1 (ir_node *, end->in, ka);
+}
+
/*
> Implementing the case construct (which is where the constant Proj node is
> important) involves far more than simply determining the constant values.
inline void set_Block_graph_arr (ir_node *node, int pos, ir_node *value);
+inline void add_End_keepalive (ir_node *end, ir_node *ka);
+
/* We distinguish three kinds of Cond nodes. These can be distinguished
by the mode of the selector operand and an internal flag of type cond_kind.
First we distinguish binary Conds and switch Conds.
res->name = name;
res->attr_size = attr_size;
res->labeled = labeled; /* For vcg dumping.
- Set labeled = 1 if the edges shuld be
- enumarated, otherwise set labeled = 0. */
+ Set labeled = 1 if the edges should be
+ enumarated in vcg output, otherwise set
+ labeled = 0. */
return res;
}
struct ir_op {
opcode code;
ident *name;
- size_t attr_size;
- int labeled;
+ size_t attr_size; /* Space needed in memory for private attributes */
+ int labeled; /* Output edge labels on in-edges in vcg graph */
};
/* create a new ir operation */
set_Tuple_pred(n, 0, jmp);
set_Tuple_pred(n, 1, new_Bad());
}
+ /* We might generate an endless loop, so keep it alive. */
+ add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
} else if (ta && (get_irn_mode(a) == mode_I) && (get_Cond_kind(n) == dense)) {
/* I don't want to allow Tuples smaller than the biggest Proj.
Also this tuple might get really big...
I generate the Jmp here, and remember it in link. Link is used
when optimizing Proj. */
set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
+ /* We might generate an endless loop, so keep it alive. */
+ add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
} else if ( (get_irn_op(get_Cond_selector(n)) == op_Eor)
&& (get_irn_mode(get_Cond_selector(n)) == mode_b)
&& (tarval_classify(computed_value(get_Eor_right(a))) == 1)) {
case iro_Start:
assert (
/* Start: BB --> X x M x P x data1 x ... x datan */
- mymode == mode_T
+ mymode == mode_T && "Start node"
);
break;
case iro_Jmp:
assert (
/* Jmp: BB --> X */
- mymode == mode_X
+ mymode == mode_X && "Jmp node"
);
break;
case iro_Cond:
/* Cond: BB x b --> X x X */
(op1mode == mode_b
/* Cond: BB x Iu --> X^n */
- || op1mode == mode_I)
+ || op1mode == mode_I) && "Cond node"
);
assert (mymode == mode_T);
break;
op1mode = get_irn_mode(in[1]);
/* Return: BB x M x data1 x ... x datan --> X */
/* printf("mode: %s, code %s\n", ID_TO_STR(n->mode->name), ID_TO_STR(n->op->name));*/
- assert ( op1mode == mode_M ); /* operand M */
+ assert ( op1mode == mode_M && "Return node" ); /* operand M */
for (i=2; i < get_irn_arity(n); i++) {
- assert ( mode_is_data(get_irn_mode(in[i])) ); /* operand datai */
+ assert ( mode_is_data(get_irn_mode(in[i])) && "Return node"); /* operand datai */
};
assert ( mymode == mode_X ); /* result X */
/* Compare returned results with result types of method type */
assert (
/* Sel: BB x M x P --> X x M */
op1mode == mode_M && op2mode == mode_p
- && mymode == mode_T
+ && mymode == mode_T && "Raise node"
);
break;
case iro_Const:
assert (
/* Const: BB --> data */
- mode_is_data (mymode) ||
- mymode == mode_b /* we want boolean constants for static evaluation
- of Cmp. */
+ (mode_is_data (mymode) ||
+ mymode == mode_b) /* we want boolean constants for static evaluation */
+ && "Const node" /* of Cmp. */
);
break;
case iro_SymConst:
assert (
/* SymConst: BB --> Iu or
BB --> P */
- (mymode == mode_I) || (mymode == mode_p)
+ ((mymode == mode_I) || (mymode == mode_p)) && "SymConst node"
);
break;
case iro_Sel:
assert (
/* Sel: BB x M x P x Iu^n --> P */
op1mode == mode_M && op2mode == mode_p
- && mymode == mode_p
+ && mymode == mode_p && "Sel node"
);
for (i=3; i < get_irn_arity(n); i++) {
- assert (get_irn_mode(in[i]) == mode_I); }
+ assert (get_irn_mode(in[i]) == mode_I && "Sel node"); }
break;
case iro_Call:
op1mode = get_irn_mode(in[1]);
op2mode = get_irn_mode(in[2]);
/* Call: BB x M x P x data1 x ... x datan
--> M x datan+1 x ... x data n+m */
- assert ( op1mode == mode_M && op2mode == mode_p ); /* operand M x P */
+ assert ( op1mode == mode_M && op2mode == mode_p && "Call node"); /* operand M x P */
for (i=3; i < get_irn_arity(n); i++) {
- assert ( mode_is_data(get_irn_mode(in[i])) ); /* operand datai */
+ assert ( mode_is_data(get_irn_mode(in[i])) && "Call node"); /* operand datai */
};
assert ( mymode == mode_T ); /* result T */
/* Compare arguments of node with those of type */
op2mode = get_irn_mode(in[2]);
assert (
/* common Add: BB x num x num --> num */
- (mymode == op1mode && mymode == op2mode
- && mode_is_num(mymode))
- || /* Pointer Add: BB x P x Is --> P */
- (op1mode == mode_p && op2mode == mode_i && mymode == mode_p)
- || /* Pointer Add: BB x Is x P --> P */
- (op1mode == mode_i && op2mode == mode_p && mymode == mode_p)
+ ((mymode == op1mode && mymode == op2mode
+ && mode_is_num(mymode))
+ || /* Pointer Add: BB x P x Is --> P */
+ (op1mode == mode_p && op2mode == mode_i && mymode == mode_p)
+ || /* Pointer Add: BB x Is x P --> P */
+ (op1mode == mode_i && op2mode == mode_p && mymode == mode_p))
+ && "Add node"
);
if (op1mode == mode_p || op2mode == mode_p) {
/* BB x P x Is --> P or BB x Is x P --> P */
op2mode = get_irn_mode(in[2]);
assert (
/* common Sub: BB x num x num --> num */
- (mymode ==op1mode && mymode == op2mode
- && mode_is_num(op1mode))
- || /* Pointer Sub: BB x P x Is --> P */
- (op1mode == mode_p && op2mode == mode_i && mymode == mode_p)
- || /* Pointer Sub: BB x Is x P --> P */
- (op1mode == mode_i && op2mode == mode_p && mymode == mode_p)
- || /* Pointer Sub: BB x P x P --> Is */
- (op1mode == mode_p && op2mode == mode_p && mymode == mode_i)
+ ((mymode ==op1mode && mymode == op2mode
+ && mode_is_num(op1mode))
+ || /* Pointer Sub: BB x P x Is --> P */
+ (op1mode == mode_p && op2mode == mode_i && mymode == mode_p)
+ || /* Pointer Sub: BB x Is x P --> P */
+ (op1mode == mode_i && op2mode == mode_p && mymode == mode_p)
+ || /* Pointer Sub: BB x P x P --> Is */
+ (op1mode == mode_p && op2mode == mode_p && mymode == mode_i))
+ && "Sub node"
);
if (op1mode == mode_p && op2mode == mode_p) {
op_is_symmetric = 1; /* ArmRoq */
op1mode = get_irn_mode(in[1]);
assert (
/* Minus: BB x float --> float */
- op1mode == mymode && mode_is_float (op1mode)
+ op1mode == mymode && mode_is_float (op1mode) && "Minus node"
);
op_is_symmetric = 2;
break;
assert (
/* Mul: BB x num x num --> num */
mymode == op1mode && mymode == op2mode
- && mode_is_num (op1mode)
+ && mode_is_num (op1mode) && "Mul node"
);
op_is_symmetric = 2;
break;
assert (
/* Quot: BB x M x float x float --> M x X x float */
op1mode == mode_M && op2mode == op3mode
- && mode_is_float(op2mode) && mymode == mode_T
+ && mode_is_float(op2mode) && mymode == mode_T && "Quot node"
);
op_is_symmetric = 2;
break;
assert (
/* DivMod: BB x M x num x num --> M x X x Is x Is */
op1mode == mode_M && op2mode == op3mode
- && mode_is_num (op2mode) && mymode == mode_T
+ && mode_is_num (op2mode) && mymode == mode_T && "DivMod node"
);
op_is_symmetric = 1;
break;
assert (
/* Div or Mod: BB x M x num x num --> M x X x Is */
op1mode == mode_M && op2mode == op3mode &&
- mode_is_num (op2mode) && mymode == mode_T
+ mode_is_num (op2mode) && mymode == mode_T && "Div or Mod node"
);
op_is_symmetric = 1;
break;
op1mode = get_irn_mode(in[1]);
assert (
/* Abs: BB x num --> num */
- op1mode == mymode && mode_is_num (op1mode)
+ op1mode == mymode && mode_is_num (op1mode) && "Abs node"
);
op_is_symmetric = 2;
break;
assert(
/* And or Or or Eor: BB x int x int --> int */
mymode == op1mode && mymode == op2mode
- && mode_is_int (mymode)
+ && mode_is_int (mymode) && "And, Or or Eor node"
);
op_is_symmetric = 2;
break;
assert(
/* Not: BB x int --> int */
mymode == op1mode
- && mode_is_int (mymode)
+ && mode_is_int (mymode) && "Not node"
);
op_is_symmetric = 2;
break;
assert(
/* Cmp: BB x datab x datab --> b16 */
op1mode == op2mode && mode_is_data (op1mode)
- && mymode == mode_T
+ && mymode == mode_T && "Cmp node"
);
break;
case iro_Shl:
assert(
/* Shl, Shr, Shrs or Rot: BB x int x Iu --> int */
mode_is_int (op1mode) && op2mode == mode_I
- && op1mode == mymode
+ && op1mode == mymode && "Shl, Shr, Shr or Rot node"
);
break;
case iro_Conv:
assert(
/* Conv: BB x datab1 --> datab2 */
mode_is_datab (op1mode)
- && mode_is_data (mymode)
+ && mode_is_data (mymode) && "Conv node"
);
break;
case iro_Phi:
/* for some reason "<=" aborts. Is there a problem with get_store? */
for (i=1; i < get_irn_arity(n); i++) {
if (!is_Bad(in[i]))
- assert ( get_irn_mode(in[i]) == mymode );
+ assert ( get_irn_mode(in[i]) == mymode && "Phi node");
};
- assert ( mode_is_dataM(mymode) );
+ assert ( mode_is_dataM(mymode) && "Phi node");
break;
case iro_Load:
op1mode = get_irn_mode(in[1]);
op2mode = get_irn_mode(in[2]);
assert(
/* Load: BB x M x P --> M x X x data */
- op1mode == mode_M && op2mode == mode_p
+ op1mode == mode_M && op2mode == mode_p && "Load node"
);
- assert ( mymode == mode_T );
+ assert ( mymode == mode_T && "Load node");
break;
case iro_Store:
op1mode = get_irn_mode(in[1]);
assert(
/* Load: BB x M x P x data --> M x X */
op1mode == mode_M && op2mode == mode_p
- && mode_is_data (op3mode)
+ && mode_is_data (op3mode) && "Store node"
);
- assert(mymode == mode_T);
+ assert(mymode == mode_T && "Store node");
break;
case iro_Alloc:
op1mode = get_irn_mode(in[1]);
assert(
/* Alloc: BB x M x Iu --> M x X x P */
op1mode == mode_M && op2mode == mode_I
- && mymode == mode_T
+ && mymode == mode_T && "Alloc node"
);
break;
case iro_Free:
assert(
/* Free: BB x M x P x Iu --> M */
op1mode == mode_M && op2mode == mode_p && op3mode == mode_I
- && mymode == mode_M
+ && mymode == mode_M && "Free node"
);
break;
case iro_Sync:
/* Sync: BB x M^n --> M */
for (i=1; i < get_irn_arity(n); i++) {
- assert ( get_irn_mode(in[i]) == mode_M );
+ assert ( get_irn_mode(in[i]) == mode_M && "Sync node");
};
- assert ( mymode == mode_M );
+ assert ( mymode == mode_M && "Sync node");
break;
case iro_Proj:
vrfy_Proj_proj(n);
}
}
-
void
vrfy_Proj_proj(ir_node *p) {
ir_node *pred;